]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/i915/i915_irq.c
Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 /**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
48 static const u32 hpd_ibx[HPD_NUM_PINS] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
54 };
55
56 static const u32 hpd_cpt[HPD_NUM_PINS] = {
57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
62 };
63
64 static const u32 hpd_spt[HPD_NUM_PINS] = {
65 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
66 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
67 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
68 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
69 };
70
71 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
72 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
73 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
74 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
75 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
76 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
77 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
78 };
79
80 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
81 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
82 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
83 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
84 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
85 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
86 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
87 };
88
89 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
90 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
91 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
92 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
93 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
94 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
95 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
96 };
97
98 /* BXT hpd list */
99 static const u32 hpd_bxt[HPD_NUM_PINS] = {
100 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
101 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
102 };
103
104 /* IIR can theoretically queue up two events. Be paranoid. */
105 #define GEN8_IRQ_RESET_NDX(type, which) do { \
106 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
107 POSTING_READ(GEN8_##type##_IMR(which)); \
108 I915_WRITE(GEN8_##type##_IER(which), 0); \
109 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
110 POSTING_READ(GEN8_##type##_IIR(which)); \
111 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
112 POSTING_READ(GEN8_##type##_IIR(which)); \
113 } while (0)
114
115 #define GEN5_IRQ_RESET(type) do { \
116 I915_WRITE(type##IMR, 0xffffffff); \
117 POSTING_READ(type##IMR); \
118 I915_WRITE(type##IER, 0); \
119 I915_WRITE(type##IIR, 0xffffffff); \
120 POSTING_READ(type##IIR); \
121 I915_WRITE(type##IIR, 0xffffffff); \
122 POSTING_READ(type##IIR); \
123 } while (0)
124
125 /*
126 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
127 */
128 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
129 u32 val = I915_READ(reg); \
130 if (val) { \
131 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
132 (reg), val); \
133 I915_WRITE((reg), 0xffffffff); \
134 POSTING_READ(reg); \
135 I915_WRITE((reg), 0xffffffff); \
136 POSTING_READ(reg); \
137 } \
138 } while (0)
139
140 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
141 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
142 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
143 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
144 POSTING_READ(GEN8_##type##_IMR(which)); \
145 } while (0)
146
147 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
148 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
149 I915_WRITE(type##IER, (ier_val)); \
150 I915_WRITE(type##IMR, (imr_val)); \
151 POSTING_READ(type##IMR); \
152 } while (0)
153
154 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
155
156 /* For display hotplug interrupt */
157 void
158 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
159 {
160 assert_spin_locked(&dev_priv->irq_lock);
161
162 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
163 return;
164
165 if ((dev_priv->irq_mask & mask) != 0) {
166 dev_priv->irq_mask &= ~mask;
167 I915_WRITE(DEIMR, dev_priv->irq_mask);
168 POSTING_READ(DEIMR);
169 }
170 }
171
172 void
173 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
174 {
175 assert_spin_locked(&dev_priv->irq_lock);
176
177 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
178 return;
179
180 if ((dev_priv->irq_mask & mask) != mask) {
181 dev_priv->irq_mask |= mask;
182 I915_WRITE(DEIMR, dev_priv->irq_mask);
183 POSTING_READ(DEIMR);
184 }
185 }
186
187 /**
188 * ilk_update_gt_irq - update GTIMR
189 * @dev_priv: driver private
190 * @interrupt_mask: mask of interrupt bits to update
191 * @enabled_irq_mask: mask of interrupt bits to enable
192 */
193 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
194 uint32_t interrupt_mask,
195 uint32_t enabled_irq_mask)
196 {
197 assert_spin_locked(&dev_priv->irq_lock);
198
199 WARN_ON(enabled_irq_mask & ~interrupt_mask);
200
201 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
202 return;
203
204 dev_priv->gt_irq_mask &= ~interrupt_mask;
205 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
206 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
207 POSTING_READ(GTIMR);
208 }
209
210 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
211 {
212 ilk_update_gt_irq(dev_priv, mask, mask);
213 }
214
215 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
216 {
217 ilk_update_gt_irq(dev_priv, mask, 0);
218 }
219
220 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
221 {
222 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
223 }
224
225 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
226 {
227 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
228 }
229
230 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
231 {
232 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
233 }
234
235 /**
236 * snb_update_pm_irq - update GEN6_PMIMR
237 * @dev_priv: driver private
238 * @interrupt_mask: mask of interrupt bits to update
239 * @enabled_irq_mask: mask of interrupt bits to enable
240 */
241 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
242 uint32_t interrupt_mask,
243 uint32_t enabled_irq_mask)
244 {
245 uint32_t new_val;
246
247 WARN_ON(enabled_irq_mask & ~interrupt_mask);
248
249 assert_spin_locked(&dev_priv->irq_lock);
250
251 new_val = dev_priv->pm_irq_mask;
252 new_val &= ~interrupt_mask;
253 new_val |= (~enabled_irq_mask & interrupt_mask);
254
255 if (new_val != dev_priv->pm_irq_mask) {
256 dev_priv->pm_irq_mask = new_val;
257 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
258 POSTING_READ(gen6_pm_imr(dev_priv));
259 }
260 }
261
262 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
263 {
264 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
265 return;
266
267 snb_update_pm_irq(dev_priv, mask, mask);
268 }
269
270 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
271 uint32_t mask)
272 {
273 snb_update_pm_irq(dev_priv, mask, 0);
274 }
275
276 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
277 {
278 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
279 return;
280
281 __gen6_disable_pm_irq(dev_priv, mask);
282 }
283
284 void gen6_reset_rps_interrupts(struct drm_device *dev)
285 {
286 struct drm_i915_private *dev_priv = dev->dev_private;
287 uint32_t reg = gen6_pm_iir(dev_priv);
288
289 spin_lock_irq(&dev_priv->irq_lock);
290 I915_WRITE(reg, dev_priv->pm_rps_events);
291 I915_WRITE(reg, dev_priv->pm_rps_events);
292 POSTING_READ(reg);
293 dev_priv->rps.pm_iir = 0;
294 spin_unlock_irq(&dev_priv->irq_lock);
295 }
296
297 void gen6_enable_rps_interrupts(struct drm_device *dev)
298 {
299 struct drm_i915_private *dev_priv = dev->dev_private;
300
301 spin_lock_irq(&dev_priv->irq_lock);
302
303 WARN_ON(dev_priv->rps.pm_iir);
304 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
305 dev_priv->rps.interrupts_enabled = true;
306 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
307 dev_priv->pm_rps_events);
308 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
309
310 spin_unlock_irq(&dev_priv->irq_lock);
311 }
312
313 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
314 {
315 /*
316 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
317 * if GEN6_PM_UP_EI_EXPIRED is masked.
318 *
319 * TODO: verify if this can be reproduced on VLV,CHV.
320 */
321 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
322 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
323
324 if (INTEL_INFO(dev_priv)->gen >= 8)
325 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
326
327 return mask;
328 }
329
330 void gen6_disable_rps_interrupts(struct drm_device *dev)
331 {
332 struct drm_i915_private *dev_priv = dev->dev_private;
333
334 spin_lock_irq(&dev_priv->irq_lock);
335 dev_priv->rps.interrupts_enabled = false;
336 spin_unlock_irq(&dev_priv->irq_lock);
337
338 cancel_work_sync(&dev_priv->rps.work);
339
340 spin_lock_irq(&dev_priv->irq_lock);
341
342 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
343
344 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
345 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
346 ~dev_priv->pm_rps_events);
347
348 spin_unlock_irq(&dev_priv->irq_lock);
349
350 synchronize_irq(dev->irq);
351 }
352
353 /**
354 * ibx_display_interrupt_update - update SDEIMR
355 * @dev_priv: driver private
356 * @interrupt_mask: mask of interrupt bits to update
357 * @enabled_irq_mask: mask of interrupt bits to enable
358 */
359 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
360 uint32_t interrupt_mask,
361 uint32_t enabled_irq_mask)
362 {
363 uint32_t sdeimr = I915_READ(SDEIMR);
364 sdeimr &= ~interrupt_mask;
365 sdeimr |= (~enabled_irq_mask & interrupt_mask);
366
367 WARN_ON(enabled_irq_mask & ~interrupt_mask);
368
369 assert_spin_locked(&dev_priv->irq_lock);
370
371 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
372 return;
373
374 I915_WRITE(SDEIMR, sdeimr);
375 POSTING_READ(SDEIMR);
376 }
377
378 static void
379 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
380 u32 enable_mask, u32 status_mask)
381 {
382 u32 reg = PIPESTAT(pipe);
383 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
384
385 assert_spin_locked(&dev_priv->irq_lock);
386 WARN_ON(!intel_irqs_enabled(dev_priv));
387
388 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
389 status_mask & ~PIPESTAT_INT_STATUS_MASK,
390 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
391 pipe_name(pipe), enable_mask, status_mask))
392 return;
393
394 if ((pipestat & enable_mask) == enable_mask)
395 return;
396
397 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
398
399 /* Enable the interrupt, clear any pending status */
400 pipestat |= enable_mask | status_mask;
401 I915_WRITE(reg, pipestat);
402 POSTING_READ(reg);
403 }
404
405 static void
406 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
407 u32 enable_mask, u32 status_mask)
408 {
409 u32 reg = PIPESTAT(pipe);
410 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
411
412 assert_spin_locked(&dev_priv->irq_lock);
413 WARN_ON(!intel_irqs_enabled(dev_priv));
414
415 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
416 status_mask & ~PIPESTAT_INT_STATUS_MASK,
417 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
418 pipe_name(pipe), enable_mask, status_mask))
419 return;
420
421 if ((pipestat & enable_mask) == 0)
422 return;
423
424 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
425
426 pipestat &= ~enable_mask;
427 I915_WRITE(reg, pipestat);
428 POSTING_READ(reg);
429 }
430
431 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
432 {
433 u32 enable_mask = status_mask << 16;
434
435 /*
436 * On pipe A we don't support the PSR interrupt yet,
437 * on pipe B and C the same bit MBZ.
438 */
439 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
440 return 0;
441 /*
442 * On pipe B and C we don't support the PSR interrupt yet, on pipe
443 * A the same bit is for perf counters which we don't use either.
444 */
445 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
446 return 0;
447
448 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
449 SPRITE0_FLIP_DONE_INT_EN_VLV |
450 SPRITE1_FLIP_DONE_INT_EN_VLV);
451 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
452 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
453 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
454 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
455
456 return enable_mask;
457 }
458
459 void
460 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
461 u32 status_mask)
462 {
463 u32 enable_mask;
464
465 if (IS_VALLEYVIEW(dev_priv->dev))
466 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
467 status_mask);
468 else
469 enable_mask = status_mask << 16;
470 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
471 }
472
473 void
474 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
475 u32 status_mask)
476 {
477 u32 enable_mask;
478
479 if (IS_VALLEYVIEW(dev_priv->dev))
480 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
481 status_mask);
482 else
483 enable_mask = status_mask << 16;
484 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
485 }
486
487 /**
488 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
489 */
490 static void i915_enable_asle_pipestat(struct drm_device *dev)
491 {
492 struct drm_i915_private *dev_priv = dev->dev_private;
493
494 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
495 return;
496
497 spin_lock_irq(&dev_priv->irq_lock);
498
499 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
500 if (INTEL_INFO(dev)->gen >= 4)
501 i915_enable_pipestat(dev_priv, PIPE_A,
502 PIPE_LEGACY_BLC_EVENT_STATUS);
503
504 spin_unlock_irq(&dev_priv->irq_lock);
505 }
506
507 /*
508 * This timing diagram depicts the video signal in and
509 * around the vertical blanking period.
510 *
511 * Assumptions about the fictitious mode used in this example:
512 * vblank_start >= 3
513 * vsync_start = vblank_start + 1
514 * vsync_end = vblank_start + 2
515 * vtotal = vblank_start + 3
516 *
517 * start of vblank:
518 * latch double buffered registers
519 * increment frame counter (ctg+)
520 * generate start of vblank interrupt (gen4+)
521 * |
522 * | frame start:
523 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
524 * | may be shifted forward 1-3 extra lines via PIPECONF
525 * | |
526 * | | start of vsync:
527 * | | generate vsync interrupt
528 * | | |
529 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
530 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
531 * ----va---> <-----------------vb--------------------> <--------va-------------
532 * | | <----vs-----> |
533 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
534 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
535 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
536 * | | |
537 * last visible pixel first visible pixel
538 * | increment frame counter (gen3/4)
539 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
540 *
541 * x = horizontal active
542 * _ = horizontal blanking
543 * hs = horizontal sync
544 * va = vertical active
545 * vb = vertical blanking
546 * vs = vertical sync
547 * vbs = vblank_start (number)
548 *
549 * Summary:
550 * - most events happen at the start of horizontal sync
551 * - frame start happens at the start of horizontal blank, 1-4 lines
552 * (depending on PIPECONF settings) after the start of vblank
553 * - gen3/4 pixel and frame counter are synchronized with the start
554 * of horizontal active on the first line of vertical active
555 */
556
557 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
558 {
559 /* Gen2 doesn't have a hardware frame counter */
560 return 0;
561 }
562
563 /* Called from drm generic code, passed a 'crtc', which
564 * we use as a pipe index
565 */
566 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
567 {
568 struct drm_i915_private *dev_priv = dev->dev_private;
569 unsigned long high_frame;
570 unsigned long low_frame;
571 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
572 struct intel_crtc *intel_crtc =
573 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
574 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
575
576 htotal = mode->crtc_htotal;
577 hsync_start = mode->crtc_hsync_start;
578 vbl_start = mode->crtc_vblank_start;
579 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
580 vbl_start = DIV_ROUND_UP(vbl_start, 2);
581
582 /* Convert to pixel count */
583 vbl_start *= htotal;
584
585 /* Start of vblank event occurs at start of hsync */
586 vbl_start -= htotal - hsync_start;
587
588 high_frame = PIPEFRAME(pipe);
589 low_frame = PIPEFRAMEPIXEL(pipe);
590
591 /*
592 * High & low register fields aren't synchronized, so make sure
593 * we get a low value that's stable across two reads of the high
594 * register.
595 */
596 do {
597 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
598 low = I915_READ(low_frame);
599 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
600 } while (high1 != high2);
601
602 high1 >>= PIPE_FRAME_HIGH_SHIFT;
603 pixel = low & PIPE_PIXEL_MASK;
604 low >>= PIPE_FRAME_LOW_SHIFT;
605
606 /*
607 * The frame counter increments at beginning of active.
608 * Cook up a vblank counter by also checking the pixel
609 * counter against vblank start.
610 */
611 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
612 }
613
614 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
615 {
616 struct drm_i915_private *dev_priv = dev->dev_private;
617 int reg = PIPE_FRMCOUNT_GM45(pipe);
618
619 return I915_READ(reg);
620 }
621
622 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
623 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
624
625 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
626 {
627 struct drm_device *dev = crtc->base.dev;
628 struct drm_i915_private *dev_priv = dev->dev_private;
629 const struct drm_display_mode *mode = &crtc->base.hwmode;
630 enum pipe pipe = crtc->pipe;
631 int position, vtotal;
632
633 vtotal = mode->crtc_vtotal;
634 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
635 vtotal /= 2;
636
637 if (IS_GEN2(dev))
638 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
639 else
640 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
641
642 /*
643 * On HSW, the DSL reg (0x70000) appears to return 0 if we
644 * read it just before the start of vblank. So try it again
645 * so we don't accidentally end up spanning a vblank frame
646 * increment, causing the pipe_update_end() code to squak at us.
647 *
648 * The nature of this problem means we can't simply check the ISR
649 * bit and return the vblank start value; nor can we use the scanline
650 * debug register in the transcoder as it appears to have the same
651 * problem. We may need to extend this to include other platforms,
652 * but so far testing only shows the problem on HSW.
653 */
654 if (IS_HASWELL(dev) && !position) {
655 int i, temp;
656
657 for (i = 0; i < 100; i++) {
658 udelay(1);
659 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
660 DSL_LINEMASK_GEN3;
661 if (temp != position) {
662 position = temp;
663 break;
664 }
665 }
666 }
667
668 /*
669 * See update_scanline_offset() for the details on the
670 * scanline_offset adjustment.
671 */
672 return (position + crtc->scanline_offset) % vtotal;
673 }
674
675 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
676 unsigned int flags, int *vpos, int *hpos,
677 ktime_t *stime, ktime_t *etime)
678 {
679 struct drm_i915_private *dev_priv = dev->dev_private;
680 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
681 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
682 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
683 int position;
684 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
685 bool in_vbl = true;
686 int ret = 0;
687 unsigned long irqflags;
688
689 if (WARN_ON(!mode->crtc_clock)) {
690 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
691 "pipe %c\n", pipe_name(pipe));
692 return 0;
693 }
694
695 htotal = mode->crtc_htotal;
696 hsync_start = mode->crtc_hsync_start;
697 vtotal = mode->crtc_vtotal;
698 vbl_start = mode->crtc_vblank_start;
699 vbl_end = mode->crtc_vblank_end;
700
701 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
702 vbl_start = DIV_ROUND_UP(vbl_start, 2);
703 vbl_end /= 2;
704 vtotal /= 2;
705 }
706
707 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
708
709 /*
710 * Lock uncore.lock, as we will do multiple timing critical raw
711 * register reads, potentially with preemption disabled, so the
712 * following code must not block on uncore.lock.
713 */
714 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
715
716 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
717
718 /* Get optional system timestamp before query. */
719 if (stime)
720 *stime = ktime_get();
721
722 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
723 /* No obvious pixelcount register. Only query vertical
724 * scanout position from Display scan line register.
725 */
726 position = __intel_get_crtc_scanline(intel_crtc);
727 } else {
728 /* Have access to pixelcount since start of frame.
729 * We can split this into vertical and horizontal
730 * scanout position.
731 */
732 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
733
734 /* convert to pixel counts */
735 vbl_start *= htotal;
736 vbl_end *= htotal;
737 vtotal *= htotal;
738
739 /*
740 * In interlaced modes, the pixel counter counts all pixels,
741 * so one field will have htotal more pixels. In order to avoid
742 * the reported position from jumping backwards when the pixel
743 * counter is beyond the length of the shorter field, just
744 * clamp the position the length of the shorter field. This
745 * matches how the scanline counter based position works since
746 * the scanline counter doesn't count the two half lines.
747 */
748 if (position >= vtotal)
749 position = vtotal - 1;
750
751 /*
752 * Start of vblank interrupt is triggered at start of hsync,
753 * just prior to the first active line of vblank. However we
754 * consider lines to start at the leading edge of horizontal
755 * active. So, should we get here before we've crossed into
756 * the horizontal active of the first line in vblank, we would
757 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
758 * always add htotal-hsync_start to the current pixel position.
759 */
760 position = (position + htotal - hsync_start) % vtotal;
761 }
762
763 /* Get optional system timestamp after query. */
764 if (etime)
765 *etime = ktime_get();
766
767 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
768
769 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
770
771 in_vbl = position >= vbl_start && position < vbl_end;
772
773 /*
774 * While in vblank, position will be negative
775 * counting up towards 0 at vbl_end. And outside
776 * vblank, position will be positive counting
777 * up since vbl_end.
778 */
779 if (position >= vbl_start)
780 position -= vbl_end;
781 else
782 position += vtotal - vbl_end;
783
784 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
785 *vpos = position;
786 *hpos = 0;
787 } else {
788 *vpos = position / htotal;
789 *hpos = position - (*vpos * htotal);
790 }
791
792 /* In vblank? */
793 if (in_vbl)
794 ret |= DRM_SCANOUTPOS_IN_VBLANK;
795
796 return ret;
797 }
798
799 int intel_get_crtc_scanline(struct intel_crtc *crtc)
800 {
801 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
802 unsigned long irqflags;
803 int position;
804
805 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
806 position = __intel_get_crtc_scanline(crtc);
807 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
808
809 return position;
810 }
811
812 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
813 int *max_error,
814 struct timeval *vblank_time,
815 unsigned flags)
816 {
817 struct drm_crtc *crtc;
818
819 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
820 DRM_ERROR("Invalid crtc %d\n", pipe);
821 return -EINVAL;
822 }
823
824 /* Get drm_crtc to timestamp: */
825 crtc = intel_get_crtc_for_pipe(dev, pipe);
826 if (crtc == NULL) {
827 DRM_ERROR("Invalid crtc %d\n", pipe);
828 return -EINVAL;
829 }
830
831 if (!crtc->hwmode.crtc_clock) {
832 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
833 return -EBUSY;
834 }
835
836 /* Helper routine in DRM core does all the work: */
837 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
838 vblank_time, flags,
839 crtc,
840 &crtc->hwmode);
841 }
842
843 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
844 {
845 struct drm_i915_private *dev_priv = dev->dev_private;
846 u32 busy_up, busy_down, max_avg, min_avg;
847 u8 new_delay;
848
849 spin_lock(&mchdev_lock);
850
851 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
852
853 new_delay = dev_priv->ips.cur_delay;
854
855 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
856 busy_up = I915_READ(RCPREVBSYTUPAVG);
857 busy_down = I915_READ(RCPREVBSYTDNAVG);
858 max_avg = I915_READ(RCBMAXAVG);
859 min_avg = I915_READ(RCBMINAVG);
860
861 /* Handle RCS change request from hw */
862 if (busy_up > max_avg) {
863 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
864 new_delay = dev_priv->ips.cur_delay - 1;
865 if (new_delay < dev_priv->ips.max_delay)
866 new_delay = dev_priv->ips.max_delay;
867 } else if (busy_down < min_avg) {
868 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
869 new_delay = dev_priv->ips.cur_delay + 1;
870 if (new_delay > dev_priv->ips.min_delay)
871 new_delay = dev_priv->ips.min_delay;
872 }
873
874 if (ironlake_set_drps(dev, new_delay))
875 dev_priv->ips.cur_delay = new_delay;
876
877 spin_unlock(&mchdev_lock);
878
879 return;
880 }
881
882 static void notify_ring(struct intel_engine_cs *ring)
883 {
884 if (!intel_ring_initialized(ring))
885 return;
886
887 trace_i915_gem_request_notify(ring);
888
889 wake_up_all(&ring->irq_queue);
890 }
891
892 static void vlv_c0_read(struct drm_i915_private *dev_priv,
893 struct intel_rps_ei *ei)
894 {
895 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
896 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
897 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
898 }
899
900 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
901 const struct intel_rps_ei *old,
902 const struct intel_rps_ei *now,
903 int threshold)
904 {
905 u64 time, c0;
906
907 if (old->cz_clock == 0)
908 return false;
909
910 time = now->cz_clock - old->cz_clock;
911 time *= threshold * dev_priv->mem_freq;
912
913 /* Workload can be split between render + media, e.g. SwapBuffers
914 * being blitted in X after being rendered in mesa. To account for
915 * this we need to combine both engines into our activity counter.
916 */
917 c0 = now->render_c0 - old->render_c0;
918 c0 += now->media_c0 - old->media_c0;
919 c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
920
921 return c0 >= time;
922 }
923
924 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
925 {
926 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
927 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
928 }
929
930 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
931 {
932 struct intel_rps_ei now;
933 u32 events = 0;
934
935 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
936 return 0;
937
938 vlv_c0_read(dev_priv, &now);
939 if (now.cz_clock == 0)
940 return 0;
941
942 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
943 if (!vlv_c0_above(dev_priv,
944 &dev_priv->rps.down_ei, &now,
945 dev_priv->rps.down_threshold))
946 events |= GEN6_PM_RP_DOWN_THRESHOLD;
947 dev_priv->rps.down_ei = now;
948 }
949
950 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
951 if (vlv_c0_above(dev_priv,
952 &dev_priv->rps.up_ei, &now,
953 dev_priv->rps.up_threshold))
954 events |= GEN6_PM_RP_UP_THRESHOLD;
955 dev_priv->rps.up_ei = now;
956 }
957
958 return events;
959 }
960
961 static bool any_waiters(struct drm_i915_private *dev_priv)
962 {
963 struct intel_engine_cs *ring;
964 int i;
965
966 for_each_ring(ring, dev_priv, i)
967 if (ring->irq_refcount)
968 return true;
969
970 return false;
971 }
972
973 static void gen6_pm_rps_work(struct work_struct *work)
974 {
975 struct drm_i915_private *dev_priv =
976 container_of(work, struct drm_i915_private, rps.work);
977 bool client_boost;
978 int new_delay, adj, min, max;
979 u32 pm_iir;
980
981 spin_lock_irq(&dev_priv->irq_lock);
982 /* Speed up work cancelation during disabling rps interrupts. */
983 if (!dev_priv->rps.interrupts_enabled) {
984 spin_unlock_irq(&dev_priv->irq_lock);
985 return;
986 }
987 pm_iir = dev_priv->rps.pm_iir;
988 dev_priv->rps.pm_iir = 0;
989 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
990 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
991 client_boost = dev_priv->rps.client_boost;
992 dev_priv->rps.client_boost = false;
993 spin_unlock_irq(&dev_priv->irq_lock);
994
995 /* Make sure we didn't queue anything we're not going to process. */
996 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
997
998 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
999 return;
1000
1001 mutex_lock(&dev_priv->rps.hw_lock);
1002
1003 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1004
1005 adj = dev_priv->rps.last_adj;
1006 new_delay = dev_priv->rps.cur_freq;
1007 min = dev_priv->rps.min_freq_softlimit;
1008 max = dev_priv->rps.max_freq_softlimit;
1009
1010 if (client_boost) {
1011 new_delay = dev_priv->rps.max_freq_softlimit;
1012 adj = 0;
1013 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1014 if (adj > 0)
1015 adj *= 2;
1016 else /* CHV needs even encode values */
1017 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1018 /*
1019 * For better performance, jump directly
1020 * to RPe if we're below it.
1021 */
1022 if (new_delay < dev_priv->rps.efficient_freq - adj) {
1023 new_delay = dev_priv->rps.efficient_freq;
1024 adj = 0;
1025 }
1026 } else if (any_waiters(dev_priv)) {
1027 adj = 0;
1028 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1029 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1030 new_delay = dev_priv->rps.efficient_freq;
1031 else
1032 new_delay = dev_priv->rps.min_freq_softlimit;
1033 adj = 0;
1034 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1035 if (adj < 0)
1036 adj *= 2;
1037 else /* CHV needs even encode values */
1038 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1039 } else { /* unknown event */
1040 adj = 0;
1041 }
1042
1043 dev_priv->rps.last_adj = adj;
1044
1045 /* sysfs frequency interfaces may have snuck in while servicing the
1046 * interrupt
1047 */
1048 new_delay += adj;
1049 new_delay = clamp_t(int, new_delay, min, max);
1050
1051 intel_set_rps(dev_priv->dev, new_delay);
1052
1053 mutex_unlock(&dev_priv->rps.hw_lock);
1054 }
1055
1056
1057 /**
1058 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1059 * occurred.
1060 * @work: workqueue struct
1061 *
1062 * Doesn't actually do anything except notify userspace. As a consequence of
1063 * this event, userspace should try to remap the bad rows since statistically
1064 * it is likely the same row is more likely to go bad again.
1065 */
1066 static void ivybridge_parity_work(struct work_struct *work)
1067 {
1068 struct drm_i915_private *dev_priv =
1069 container_of(work, struct drm_i915_private, l3_parity.error_work);
1070 u32 error_status, row, bank, subbank;
1071 char *parity_event[6];
1072 uint32_t misccpctl;
1073 uint8_t slice = 0;
1074
1075 /* We must turn off DOP level clock gating to access the L3 registers.
1076 * In order to prevent a get/put style interface, acquire struct mutex
1077 * any time we access those registers.
1078 */
1079 mutex_lock(&dev_priv->dev->struct_mutex);
1080
1081 /* If we've screwed up tracking, just let the interrupt fire again */
1082 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1083 goto out;
1084
1085 misccpctl = I915_READ(GEN7_MISCCPCTL);
1086 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1087 POSTING_READ(GEN7_MISCCPCTL);
1088
1089 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1090 u32 reg;
1091
1092 slice--;
1093 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1094 break;
1095
1096 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1097
1098 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1099
1100 error_status = I915_READ(reg);
1101 row = GEN7_PARITY_ERROR_ROW(error_status);
1102 bank = GEN7_PARITY_ERROR_BANK(error_status);
1103 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1104
1105 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1106 POSTING_READ(reg);
1107
1108 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1109 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1110 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1111 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1112 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1113 parity_event[5] = NULL;
1114
1115 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1116 KOBJ_CHANGE, parity_event);
1117
1118 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1119 slice, row, bank, subbank);
1120
1121 kfree(parity_event[4]);
1122 kfree(parity_event[3]);
1123 kfree(parity_event[2]);
1124 kfree(parity_event[1]);
1125 }
1126
1127 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1128
1129 out:
1130 WARN_ON(dev_priv->l3_parity.which_slice);
1131 spin_lock_irq(&dev_priv->irq_lock);
1132 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1133 spin_unlock_irq(&dev_priv->irq_lock);
1134
1135 mutex_unlock(&dev_priv->dev->struct_mutex);
1136 }
1137
1138 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1139 {
1140 struct drm_i915_private *dev_priv = dev->dev_private;
1141
1142 if (!HAS_L3_DPF(dev))
1143 return;
1144
1145 spin_lock(&dev_priv->irq_lock);
1146 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1147 spin_unlock(&dev_priv->irq_lock);
1148
1149 iir &= GT_PARITY_ERROR(dev);
1150 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1151 dev_priv->l3_parity.which_slice |= 1 << 1;
1152
1153 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1154 dev_priv->l3_parity.which_slice |= 1 << 0;
1155
1156 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1157 }
1158
1159 static void ilk_gt_irq_handler(struct drm_device *dev,
1160 struct drm_i915_private *dev_priv,
1161 u32 gt_iir)
1162 {
1163 if (gt_iir &
1164 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1165 notify_ring(&dev_priv->ring[RCS]);
1166 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1167 notify_ring(&dev_priv->ring[VCS]);
1168 }
1169
1170 static void snb_gt_irq_handler(struct drm_device *dev,
1171 struct drm_i915_private *dev_priv,
1172 u32 gt_iir)
1173 {
1174
1175 if (gt_iir &
1176 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1177 notify_ring(&dev_priv->ring[RCS]);
1178 if (gt_iir & GT_BSD_USER_INTERRUPT)
1179 notify_ring(&dev_priv->ring[VCS]);
1180 if (gt_iir & GT_BLT_USER_INTERRUPT)
1181 notify_ring(&dev_priv->ring[BCS]);
1182
1183 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1184 GT_BSD_CS_ERROR_INTERRUPT |
1185 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1186 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1187
1188 if (gt_iir & GT_PARITY_ERROR(dev))
1189 ivybridge_parity_error_irq_handler(dev, gt_iir);
1190 }
1191
1192 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1193 u32 master_ctl)
1194 {
1195 irqreturn_t ret = IRQ_NONE;
1196
1197 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1198 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
1199 if (tmp) {
1200 I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
1201 ret = IRQ_HANDLED;
1202
1203 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1204 intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1205 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1206 notify_ring(&dev_priv->ring[RCS]);
1207
1208 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1209 intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1210 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1211 notify_ring(&dev_priv->ring[BCS]);
1212 } else
1213 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1214 }
1215
1216 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1217 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
1218 if (tmp) {
1219 I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
1220 ret = IRQ_HANDLED;
1221
1222 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1223 intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1224 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1225 notify_ring(&dev_priv->ring[VCS]);
1226
1227 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1228 intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1229 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1230 notify_ring(&dev_priv->ring[VCS2]);
1231 } else
1232 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1233 }
1234
1235 if (master_ctl & GEN8_GT_VECS_IRQ) {
1236 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
1237 if (tmp) {
1238 I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
1239 ret = IRQ_HANDLED;
1240
1241 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1242 intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1243 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1244 notify_ring(&dev_priv->ring[VECS]);
1245 } else
1246 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1247 }
1248
1249 if (master_ctl & GEN8_GT_PM_IRQ) {
1250 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
1251 if (tmp & dev_priv->pm_rps_events) {
1252 I915_WRITE_FW(GEN8_GT_IIR(2),
1253 tmp & dev_priv->pm_rps_events);
1254 ret = IRQ_HANDLED;
1255 gen6_rps_irq_handler(dev_priv, tmp);
1256 } else
1257 DRM_ERROR("The master control interrupt lied (PM)!\n");
1258 }
1259
1260 return ret;
1261 }
1262
1263 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1264 {
1265 switch (port) {
1266 case PORT_A:
1267 return val & BXT_PORTA_HOTPLUG_LONG_DETECT;
1268 case PORT_B:
1269 return val & PORTB_HOTPLUG_LONG_DETECT;
1270 case PORT_C:
1271 return val & PORTC_HOTPLUG_LONG_DETECT;
1272 case PORT_D:
1273 return val & PORTD_HOTPLUG_LONG_DETECT;
1274 default:
1275 return false;
1276 }
1277 }
1278
1279 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1280 {
1281 switch (port) {
1282 case PORT_B:
1283 return val & PORTB_HOTPLUG_LONG_DETECT;
1284 case PORT_C:
1285 return val & PORTC_HOTPLUG_LONG_DETECT;
1286 case PORT_D:
1287 return val & PORTD_HOTPLUG_LONG_DETECT;
1288 case PORT_E:
1289 return val & PORTE_HOTPLUG_LONG_DETECT;
1290 default:
1291 return false;
1292 }
1293 }
1294
1295 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1296 {
1297 switch (port) {
1298 case PORT_B:
1299 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1300 case PORT_C:
1301 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1302 case PORT_D:
1303 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1304 default:
1305 return false;
1306 }
1307 }
1308
1309 /* Get a bit mask of pins that have triggered, and which ones may be long. */
1310 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1311 u32 hotplug_trigger, u32 dig_hotplug_reg,
1312 const u32 hpd[HPD_NUM_PINS],
1313 bool long_pulse_detect(enum port port, u32 val))
1314 {
1315 enum port port;
1316 int i;
1317
1318 *pin_mask = 0;
1319 *long_mask = 0;
1320
1321 for_each_hpd_pin(i) {
1322 if ((hpd[i] & hotplug_trigger) == 0)
1323 continue;
1324
1325 *pin_mask |= BIT(i);
1326
1327 if (!intel_hpd_pin_to_port(i, &port))
1328 continue;
1329
1330 if (long_pulse_detect(port, dig_hotplug_reg))
1331 *long_mask |= BIT(i);
1332 }
1333
1334 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1335 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1336
1337 }
1338
1339 static void gmbus_irq_handler(struct drm_device *dev)
1340 {
1341 struct drm_i915_private *dev_priv = dev->dev_private;
1342
1343 wake_up_all(&dev_priv->gmbus_wait_queue);
1344 }
1345
1346 static void dp_aux_irq_handler(struct drm_device *dev)
1347 {
1348 struct drm_i915_private *dev_priv = dev->dev_private;
1349
1350 wake_up_all(&dev_priv->gmbus_wait_queue);
1351 }
1352
1353 #if defined(CONFIG_DEBUG_FS)
1354 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1355 uint32_t crc0, uint32_t crc1,
1356 uint32_t crc2, uint32_t crc3,
1357 uint32_t crc4)
1358 {
1359 struct drm_i915_private *dev_priv = dev->dev_private;
1360 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1361 struct intel_pipe_crc_entry *entry;
1362 int head, tail;
1363
1364 spin_lock(&pipe_crc->lock);
1365
1366 if (!pipe_crc->entries) {
1367 spin_unlock(&pipe_crc->lock);
1368 DRM_DEBUG_KMS("spurious interrupt\n");
1369 return;
1370 }
1371
1372 head = pipe_crc->head;
1373 tail = pipe_crc->tail;
1374
1375 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1376 spin_unlock(&pipe_crc->lock);
1377 DRM_ERROR("CRC buffer overflowing\n");
1378 return;
1379 }
1380
1381 entry = &pipe_crc->entries[head];
1382
1383 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1384 entry->crc[0] = crc0;
1385 entry->crc[1] = crc1;
1386 entry->crc[2] = crc2;
1387 entry->crc[3] = crc3;
1388 entry->crc[4] = crc4;
1389
1390 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1391 pipe_crc->head = head;
1392
1393 spin_unlock(&pipe_crc->lock);
1394
1395 wake_up_interruptible(&pipe_crc->wq);
1396 }
1397 #else
1398 static inline void
1399 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1400 uint32_t crc0, uint32_t crc1,
1401 uint32_t crc2, uint32_t crc3,
1402 uint32_t crc4) {}
1403 #endif
1404
1405
1406 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1407 {
1408 struct drm_i915_private *dev_priv = dev->dev_private;
1409
1410 display_pipe_crc_irq_handler(dev, pipe,
1411 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1412 0, 0, 0, 0);
1413 }
1414
1415 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1416 {
1417 struct drm_i915_private *dev_priv = dev->dev_private;
1418
1419 display_pipe_crc_irq_handler(dev, pipe,
1420 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1421 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1422 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1423 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1424 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1425 }
1426
1427 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1428 {
1429 struct drm_i915_private *dev_priv = dev->dev_private;
1430 uint32_t res1, res2;
1431
1432 if (INTEL_INFO(dev)->gen >= 3)
1433 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1434 else
1435 res1 = 0;
1436
1437 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1438 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1439 else
1440 res2 = 0;
1441
1442 display_pipe_crc_irq_handler(dev, pipe,
1443 I915_READ(PIPE_CRC_RES_RED(pipe)),
1444 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1445 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1446 res1, res2);
1447 }
1448
1449 /* The RPS events need forcewake, so we add them to a work queue and mask their
1450 * IMR bits until the work is done. Other interrupts can be processed without
1451 * the work queue. */
1452 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1453 {
1454 if (pm_iir & dev_priv->pm_rps_events) {
1455 spin_lock(&dev_priv->irq_lock);
1456 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1457 if (dev_priv->rps.interrupts_enabled) {
1458 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1459 queue_work(dev_priv->wq, &dev_priv->rps.work);
1460 }
1461 spin_unlock(&dev_priv->irq_lock);
1462 }
1463
1464 if (INTEL_INFO(dev_priv)->gen >= 8)
1465 return;
1466
1467 if (HAS_VEBOX(dev_priv->dev)) {
1468 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1469 notify_ring(&dev_priv->ring[VECS]);
1470
1471 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1472 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1473 }
1474 }
1475
1476 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1477 {
1478 if (!drm_handle_vblank(dev, pipe))
1479 return false;
1480
1481 return true;
1482 }
1483
1484 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1485 {
1486 struct drm_i915_private *dev_priv = dev->dev_private;
1487 u32 pipe_stats[I915_MAX_PIPES] = { };
1488 int pipe;
1489
1490 spin_lock(&dev_priv->irq_lock);
1491 for_each_pipe(dev_priv, pipe) {
1492 int reg;
1493 u32 mask, iir_bit = 0;
1494
1495 /*
1496 * PIPESTAT bits get signalled even when the interrupt is
1497 * disabled with the mask bits, and some of the status bits do
1498 * not generate interrupts at all (like the underrun bit). Hence
1499 * we need to be careful that we only handle what we want to
1500 * handle.
1501 */
1502
1503 /* fifo underruns are filterered in the underrun handler. */
1504 mask = PIPE_FIFO_UNDERRUN_STATUS;
1505
1506 switch (pipe) {
1507 case PIPE_A:
1508 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1509 break;
1510 case PIPE_B:
1511 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1512 break;
1513 case PIPE_C:
1514 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1515 break;
1516 }
1517 if (iir & iir_bit)
1518 mask |= dev_priv->pipestat_irq_mask[pipe];
1519
1520 if (!mask)
1521 continue;
1522
1523 reg = PIPESTAT(pipe);
1524 mask |= PIPESTAT_INT_ENABLE_MASK;
1525 pipe_stats[pipe] = I915_READ(reg) & mask;
1526
1527 /*
1528 * Clear the PIPE*STAT regs before the IIR
1529 */
1530 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1531 PIPESTAT_INT_STATUS_MASK))
1532 I915_WRITE(reg, pipe_stats[pipe]);
1533 }
1534 spin_unlock(&dev_priv->irq_lock);
1535
1536 for_each_pipe(dev_priv, pipe) {
1537 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1538 intel_pipe_handle_vblank(dev, pipe))
1539 intel_check_page_flip(dev, pipe);
1540
1541 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1542 intel_prepare_page_flip(dev, pipe);
1543 intel_finish_page_flip(dev, pipe);
1544 }
1545
1546 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1547 i9xx_pipe_crc_irq_handler(dev, pipe);
1548
1549 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1550 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1551 }
1552
1553 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1554 gmbus_irq_handler(dev);
1555 }
1556
1557 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1558 {
1559 struct drm_i915_private *dev_priv = dev->dev_private;
1560 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1561 u32 pin_mask, long_mask;
1562
1563 if (!hotplug_status)
1564 return;
1565
1566 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1567 /*
1568 * Make sure hotplug status is cleared before we clear IIR, or else we
1569 * may miss hotplug events.
1570 */
1571 POSTING_READ(PORT_HOTPLUG_STAT);
1572
1573 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1574 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1575
1576 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1577 hotplug_trigger, hpd_status_g4x,
1578 i9xx_port_hotplug_long_detect);
1579 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1580
1581 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1582 dp_aux_irq_handler(dev);
1583 } else {
1584 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1585
1586 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1587 hotplug_trigger, hpd_status_i915,
1588 i9xx_port_hotplug_long_detect);
1589 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1590 }
1591 }
1592
1593 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1594 {
1595 struct drm_device *dev = arg;
1596 struct drm_i915_private *dev_priv = dev->dev_private;
1597 u32 iir, gt_iir, pm_iir;
1598 irqreturn_t ret = IRQ_NONE;
1599
1600 if (!intel_irqs_enabled(dev_priv))
1601 return IRQ_NONE;
1602
1603 while (true) {
1604 /* Find, clear, then process each source of interrupt */
1605
1606 gt_iir = I915_READ(GTIIR);
1607 if (gt_iir)
1608 I915_WRITE(GTIIR, gt_iir);
1609
1610 pm_iir = I915_READ(GEN6_PMIIR);
1611 if (pm_iir)
1612 I915_WRITE(GEN6_PMIIR, pm_iir);
1613
1614 iir = I915_READ(VLV_IIR);
1615 if (iir) {
1616 /* Consume port before clearing IIR or we'll miss events */
1617 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1618 i9xx_hpd_irq_handler(dev);
1619 I915_WRITE(VLV_IIR, iir);
1620 }
1621
1622 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1623 goto out;
1624
1625 ret = IRQ_HANDLED;
1626
1627 if (gt_iir)
1628 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1629 if (pm_iir)
1630 gen6_rps_irq_handler(dev_priv, pm_iir);
1631 /* Call regardless, as some status bits might not be
1632 * signalled in iir */
1633 valleyview_pipestat_irq_handler(dev, iir);
1634 }
1635
1636 out:
1637 return ret;
1638 }
1639
1640 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1641 {
1642 struct drm_device *dev = arg;
1643 struct drm_i915_private *dev_priv = dev->dev_private;
1644 u32 master_ctl, iir;
1645 irqreturn_t ret = IRQ_NONE;
1646
1647 if (!intel_irqs_enabled(dev_priv))
1648 return IRQ_NONE;
1649
1650 for (;;) {
1651 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1652 iir = I915_READ(VLV_IIR);
1653
1654 if (master_ctl == 0 && iir == 0)
1655 break;
1656
1657 ret = IRQ_HANDLED;
1658
1659 I915_WRITE(GEN8_MASTER_IRQ, 0);
1660
1661 /* Find, clear, then process each source of interrupt */
1662
1663 if (iir) {
1664 /* Consume port before clearing IIR or we'll miss events */
1665 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1666 i9xx_hpd_irq_handler(dev);
1667 I915_WRITE(VLV_IIR, iir);
1668 }
1669
1670 gen8_gt_irq_handler(dev_priv, master_ctl);
1671
1672 /* Call regardless, as some status bits might not be
1673 * signalled in iir */
1674 valleyview_pipestat_irq_handler(dev, iir);
1675
1676 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1677 POSTING_READ(GEN8_MASTER_IRQ);
1678 }
1679
1680 return ret;
1681 }
1682
1683 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1684 {
1685 struct drm_i915_private *dev_priv = dev->dev_private;
1686 int pipe;
1687 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1688
1689 if (hotplug_trigger) {
1690 u32 dig_hotplug_reg, pin_mask, long_mask;
1691
1692 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1693 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1694
1695 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1696 dig_hotplug_reg, hpd_ibx,
1697 pch_port_hotplug_long_detect);
1698 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1699 }
1700
1701 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1702 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1703 SDE_AUDIO_POWER_SHIFT);
1704 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1705 port_name(port));
1706 }
1707
1708 if (pch_iir & SDE_AUX_MASK)
1709 dp_aux_irq_handler(dev);
1710
1711 if (pch_iir & SDE_GMBUS)
1712 gmbus_irq_handler(dev);
1713
1714 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1715 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1716
1717 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1718 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1719
1720 if (pch_iir & SDE_POISON)
1721 DRM_ERROR("PCH poison interrupt\n");
1722
1723 if (pch_iir & SDE_FDI_MASK)
1724 for_each_pipe(dev_priv, pipe)
1725 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1726 pipe_name(pipe),
1727 I915_READ(FDI_RX_IIR(pipe)));
1728
1729 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1730 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1731
1732 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1733 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1734
1735 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1736 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1737
1738 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1739 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1740 }
1741
1742 static void ivb_err_int_handler(struct drm_device *dev)
1743 {
1744 struct drm_i915_private *dev_priv = dev->dev_private;
1745 u32 err_int = I915_READ(GEN7_ERR_INT);
1746 enum pipe pipe;
1747
1748 if (err_int & ERR_INT_POISON)
1749 DRM_ERROR("Poison interrupt\n");
1750
1751 for_each_pipe(dev_priv, pipe) {
1752 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1753 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1754
1755 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1756 if (IS_IVYBRIDGE(dev))
1757 ivb_pipe_crc_irq_handler(dev, pipe);
1758 else
1759 hsw_pipe_crc_irq_handler(dev, pipe);
1760 }
1761 }
1762
1763 I915_WRITE(GEN7_ERR_INT, err_int);
1764 }
1765
1766 static void cpt_serr_int_handler(struct drm_device *dev)
1767 {
1768 struct drm_i915_private *dev_priv = dev->dev_private;
1769 u32 serr_int = I915_READ(SERR_INT);
1770
1771 if (serr_int & SERR_INT_POISON)
1772 DRM_ERROR("PCH poison interrupt\n");
1773
1774 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1775 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1776
1777 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1778 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1779
1780 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1781 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1782
1783 I915_WRITE(SERR_INT, serr_int);
1784 }
1785
1786 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1787 {
1788 struct drm_i915_private *dev_priv = dev->dev_private;
1789 int pipe;
1790 u32 hotplug_trigger;
1791
1792 if (HAS_PCH_SPT(dev))
1793 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT;
1794 else
1795 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1796
1797 if (hotplug_trigger) {
1798 u32 dig_hotplug_reg, pin_mask, long_mask;
1799
1800 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1801 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1802
1803 if (HAS_PCH_SPT(dev)) {
1804 intel_get_hpd_pins(&pin_mask, &long_mask,
1805 hotplug_trigger,
1806 dig_hotplug_reg, hpd_spt,
1807 pch_port_hotplug_long_detect);
1808
1809 /* detect PORTE HP event */
1810 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1811 if (pch_port_hotplug_long_detect(PORT_E,
1812 dig_hotplug_reg))
1813 long_mask |= 1 << HPD_PORT_E;
1814 } else
1815 intel_get_hpd_pins(&pin_mask, &long_mask,
1816 hotplug_trigger,
1817 dig_hotplug_reg, hpd_cpt,
1818 pch_port_hotplug_long_detect);
1819
1820 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1821 }
1822
1823 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1824 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1825 SDE_AUDIO_POWER_SHIFT_CPT);
1826 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1827 port_name(port));
1828 }
1829
1830 if (pch_iir & SDE_AUX_MASK_CPT)
1831 dp_aux_irq_handler(dev);
1832
1833 if (pch_iir & SDE_GMBUS_CPT)
1834 gmbus_irq_handler(dev);
1835
1836 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1837 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1838
1839 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1840 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1841
1842 if (pch_iir & SDE_FDI_MASK_CPT)
1843 for_each_pipe(dev_priv, pipe)
1844 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1845 pipe_name(pipe),
1846 I915_READ(FDI_RX_IIR(pipe)));
1847
1848 if (pch_iir & SDE_ERROR_CPT)
1849 cpt_serr_int_handler(dev);
1850 }
1851
1852 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1853 {
1854 struct drm_i915_private *dev_priv = dev->dev_private;
1855 enum pipe pipe;
1856
1857 if (de_iir & DE_AUX_CHANNEL_A)
1858 dp_aux_irq_handler(dev);
1859
1860 if (de_iir & DE_GSE)
1861 intel_opregion_asle_intr(dev);
1862
1863 if (de_iir & DE_POISON)
1864 DRM_ERROR("Poison interrupt\n");
1865
1866 for_each_pipe(dev_priv, pipe) {
1867 if (de_iir & DE_PIPE_VBLANK(pipe) &&
1868 intel_pipe_handle_vblank(dev, pipe))
1869 intel_check_page_flip(dev, pipe);
1870
1871 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1872 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1873
1874 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1875 i9xx_pipe_crc_irq_handler(dev, pipe);
1876
1877 /* plane/pipes map 1:1 on ilk+ */
1878 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1879 intel_prepare_page_flip(dev, pipe);
1880 intel_finish_page_flip_plane(dev, pipe);
1881 }
1882 }
1883
1884 /* check event from PCH */
1885 if (de_iir & DE_PCH_EVENT) {
1886 u32 pch_iir = I915_READ(SDEIIR);
1887
1888 if (HAS_PCH_CPT(dev))
1889 cpt_irq_handler(dev, pch_iir);
1890 else
1891 ibx_irq_handler(dev, pch_iir);
1892
1893 /* should clear PCH hotplug event before clear CPU irq */
1894 I915_WRITE(SDEIIR, pch_iir);
1895 }
1896
1897 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1898 ironlake_rps_change_irq_handler(dev);
1899 }
1900
1901 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1902 {
1903 struct drm_i915_private *dev_priv = dev->dev_private;
1904 enum pipe pipe;
1905
1906 if (de_iir & DE_ERR_INT_IVB)
1907 ivb_err_int_handler(dev);
1908
1909 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1910 dp_aux_irq_handler(dev);
1911
1912 if (de_iir & DE_GSE_IVB)
1913 intel_opregion_asle_intr(dev);
1914
1915 for_each_pipe(dev_priv, pipe) {
1916 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
1917 intel_pipe_handle_vblank(dev, pipe))
1918 intel_check_page_flip(dev, pipe);
1919
1920 /* plane/pipes map 1:1 on ilk+ */
1921 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
1922 intel_prepare_page_flip(dev, pipe);
1923 intel_finish_page_flip_plane(dev, pipe);
1924 }
1925 }
1926
1927 /* check event from PCH */
1928 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1929 u32 pch_iir = I915_READ(SDEIIR);
1930
1931 cpt_irq_handler(dev, pch_iir);
1932
1933 /* clear PCH hotplug event before clear CPU irq */
1934 I915_WRITE(SDEIIR, pch_iir);
1935 }
1936 }
1937
1938 /*
1939 * To handle irqs with the minimum potential races with fresh interrupts, we:
1940 * 1 - Disable Master Interrupt Control.
1941 * 2 - Find the source(s) of the interrupt.
1942 * 3 - Clear the Interrupt Identity bits (IIR).
1943 * 4 - Process the interrupt(s) that had bits set in the IIRs.
1944 * 5 - Re-enable Master Interrupt Control.
1945 */
1946 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1947 {
1948 struct drm_device *dev = arg;
1949 struct drm_i915_private *dev_priv = dev->dev_private;
1950 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1951 irqreturn_t ret = IRQ_NONE;
1952
1953 if (!intel_irqs_enabled(dev_priv))
1954 return IRQ_NONE;
1955
1956 /* We get interrupts on unclaimed registers, so check for this before we
1957 * do any I915_{READ,WRITE}. */
1958 intel_uncore_check_errors(dev);
1959
1960 /* disable master interrupt before clearing iir */
1961 de_ier = I915_READ(DEIER);
1962 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1963 POSTING_READ(DEIER);
1964
1965 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1966 * interrupts will will be stored on its back queue, and then we'll be
1967 * able to process them after we restore SDEIER (as soon as we restore
1968 * it, we'll get an interrupt if SDEIIR still has something to process
1969 * due to its back queue). */
1970 if (!HAS_PCH_NOP(dev)) {
1971 sde_ier = I915_READ(SDEIER);
1972 I915_WRITE(SDEIER, 0);
1973 POSTING_READ(SDEIER);
1974 }
1975
1976 /* Find, clear, then process each source of interrupt */
1977
1978 gt_iir = I915_READ(GTIIR);
1979 if (gt_iir) {
1980 I915_WRITE(GTIIR, gt_iir);
1981 ret = IRQ_HANDLED;
1982 if (INTEL_INFO(dev)->gen >= 6)
1983 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1984 else
1985 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1986 }
1987
1988 de_iir = I915_READ(DEIIR);
1989 if (de_iir) {
1990 I915_WRITE(DEIIR, de_iir);
1991 ret = IRQ_HANDLED;
1992 if (INTEL_INFO(dev)->gen >= 7)
1993 ivb_display_irq_handler(dev, de_iir);
1994 else
1995 ilk_display_irq_handler(dev, de_iir);
1996 }
1997
1998 if (INTEL_INFO(dev)->gen >= 6) {
1999 u32 pm_iir = I915_READ(GEN6_PMIIR);
2000 if (pm_iir) {
2001 I915_WRITE(GEN6_PMIIR, pm_iir);
2002 ret = IRQ_HANDLED;
2003 gen6_rps_irq_handler(dev_priv, pm_iir);
2004 }
2005 }
2006
2007 I915_WRITE(DEIER, de_ier);
2008 POSTING_READ(DEIER);
2009 if (!HAS_PCH_NOP(dev)) {
2010 I915_WRITE(SDEIER, sde_ier);
2011 POSTING_READ(SDEIER);
2012 }
2013
2014 return ret;
2015 }
2016
2017 static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
2018 {
2019 struct drm_i915_private *dev_priv = dev->dev_private;
2020 u32 hp_control, hp_trigger;
2021 u32 pin_mask, long_mask;
2022
2023 /* Get the status */
2024 hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
2025 hp_control = I915_READ(BXT_HOTPLUG_CTL);
2026
2027 /* Hotplug not enabled ? */
2028 if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) {
2029 DRM_ERROR("Interrupt when HPD disabled\n");
2030 return;
2031 }
2032
2033 /* Clear sticky bits in hpd status */
2034 I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
2035
2036 intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
2037 hpd_bxt, bxt_port_hotplug_long_detect);
2038 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2039 }
2040
2041 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2042 {
2043 struct drm_device *dev = arg;
2044 struct drm_i915_private *dev_priv = dev->dev_private;
2045 u32 master_ctl;
2046 irqreturn_t ret = IRQ_NONE;
2047 uint32_t tmp = 0;
2048 enum pipe pipe;
2049 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2050
2051 if (!intel_irqs_enabled(dev_priv))
2052 return IRQ_NONE;
2053
2054 if (IS_GEN9(dev))
2055 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2056 GEN9_AUX_CHANNEL_D;
2057
2058 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2059 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2060 if (!master_ctl)
2061 return IRQ_NONE;
2062
2063 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2064
2065 /* Find, clear, then process each source of interrupt */
2066
2067 ret = gen8_gt_irq_handler(dev_priv, master_ctl);
2068
2069 if (master_ctl & GEN8_DE_MISC_IRQ) {
2070 tmp = I915_READ(GEN8_DE_MISC_IIR);
2071 if (tmp) {
2072 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2073 ret = IRQ_HANDLED;
2074 if (tmp & GEN8_DE_MISC_GSE)
2075 intel_opregion_asle_intr(dev);
2076 else
2077 DRM_ERROR("Unexpected DE Misc interrupt\n");
2078 }
2079 else
2080 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2081 }
2082
2083 if (master_ctl & GEN8_DE_PORT_IRQ) {
2084 tmp = I915_READ(GEN8_DE_PORT_IIR);
2085 if (tmp) {
2086 bool found = false;
2087
2088 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2089 ret = IRQ_HANDLED;
2090
2091 if (tmp & aux_mask) {
2092 dp_aux_irq_handler(dev);
2093 found = true;
2094 }
2095
2096 if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) {
2097 bxt_hpd_handler(dev, tmp);
2098 found = true;
2099 }
2100
2101 if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2102 gmbus_irq_handler(dev);
2103 found = true;
2104 }
2105
2106 if (!found)
2107 DRM_ERROR("Unexpected DE Port interrupt\n");
2108 }
2109 else
2110 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2111 }
2112
2113 for_each_pipe(dev_priv, pipe) {
2114 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2115
2116 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2117 continue;
2118
2119 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2120 if (pipe_iir) {
2121 ret = IRQ_HANDLED;
2122 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2123
2124 if (pipe_iir & GEN8_PIPE_VBLANK &&
2125 intel_pipe_handle_vblank(dev, pipe))
2126 intel_check_page_flip(dev, pipe);
2127
2128 if (IS_GEN9(dev))
2129 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2130 else
2131 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2132
2133 if (flip_done) {
2134 intel_prepare_page_flip(dev, pipe);
2135 intel_finish_page_flip_plane(dev, pipe);
2136 }
2137
2138 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2139 hsw_pipe_crc_irq_handler(dev, pipe);
2140
2141 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2142 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2143 pipe);
2144
2145
2146 if (IS_GEN9(dev))
2147 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2148 else
2149 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2150
2151 if (fault_errors)
2152 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2153 pipe_name(pipe),
2154 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2155 } else
2156 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2157 }
2158
2159 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2160 master_ctl & GEN8_DE_PCH_IRQ) {
2161 /*
2162 * FIXME(BDW): Assume for now that the new interrupt handling
2163 * scheme also closed the SDE interrupt handling race we've seen
2164 * on older pch-split platforms. But this needs testing.
2165 */
2166 u32 pch_iir = I915_READ(SDEIIR);
2167 if (pch_iir) {
2168 I915_WRITE(SDEIIR, pch_iir);
2169 ret = IRQ_HANDLED;
2170 cpt_irq_handler(dev, pch_iir);
2171 } else
2172 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2173
2174 }
2175
2176 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2177 POSTING_READ_FW(GEN8_MASTER_IRQ);
2178
2179 return ret;
2180 }
2181
2182 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2183 bool reset_completed)
2184 {
2185 struct intel_engine_cs *ring;
2186 int i;
2187
2188 /*
2189 * Notify all waiters for GPU completion events that reset state has
2190 * been changed, and that they need to restart their wait after
2191 * checking for potential errors (and bail out to drop locks if there is
2192 * a gpu reset pending so that i915_error_work_func can acquire them).
2193 */
2194
2195 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2196 for_each_ring(ring, dev_priv, i)
2197 wake_up_all(&ring->irq_queue);
2198
2199 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2200 wake_up_all(&dev_priv->pending_flip_queue);
2201
2202 /*
2203 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2204 * reset state is cleared.
2205 */
2206 if (reset_completed)
2207 wake_up_all(&dev_priv->gpu_error.reset_queue);
2208 }
2209
2210 /**
2211 * i915_reset_and_wakeup - do process context error handling work
2212 *
2213 * Fire an error uevent so userspace can see that a hang or error
2214 * was detected.
2215 */
2216 static void i915_reset_and_wakeup(struct drm_device *dev)
2217 {
2218 struct drm_i915_private *dev_priv = to_i915(dev);
2219 struct i915_gpu_error *error = &dev_priv->gpu_error;
2220 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2221 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2222 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2223 int ret;
2224
2225 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2226
2227 /*
2228 * Note that there's only one work item which does gpu resets, so we
2229 * need not worry about concurrent gpu resets potentially incrementing
2230 * error->reset_counter twice. We only need to take care of another
2231 * racing irq/hangcheck declaring the gpu dead for a second time. A
2232 * quick check for that is good enough: schedule_work ensures the
2233 * correct ordering between hang detection and this work item, and since
2234 * the reset in-progress bit is only ever set by code outside of this
2235 * work we don't need to worry about any other races.
2236 */
2237 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2238 DRM_DEBUG_DRIVER("resetting chip\n");
2239 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2240 reset_event);
2241
2242 /*
2243 * In most cases it's guaranteed that we get here with an RPM
2244 * reference held, for example because there is a pending GPU
2245 * request that won't finish until the reset is done. This
2246 * isn't the case at least when we get here by doing a
2247 * simulated reset via debugs, so get an RPM reference.
2248 */
2249 intel_runtime_pm_get(dev_priv);
2250
2251 intel_prepare_reset(dev);
2252
2253 /*
2254 * All state reset _must_ be completed before we update the
2255 * reset counter, for otherwise waiters might miss the reset
2256 * pending state and not properly drop locks, resulting in
2257 * deadlocks with the reset work.
2258 */
2259 ret = i915_reset(dev);
2260
2261 intel_finish_reset(dev);
2262
2263 intel_runtime_pm_put(dev_priv);
2264
2265 if (ret == 0) {
2266 /*
2267 * After all the gem state is reset, increment the reset
2268 * counter and wake up everyone waiting for the reset to
2269 * complete.
2270 *
2271 * Since unlock operations are a one-sided barrier only,
2272 * we need to insert a barrier here to order any seqno
2273 * updates before
2274 * the counter increment.
2275 */
2276 smp_mb__before_atomic();
2277 atomic_inc(&dev_priv->gpu_error.reset_counter);
2278
2279 kobject_uevent_env(&dev->primary->kdev->kobj,
2280 KOBJ_CHANGE, reset_done_event);
2281 } else {
2282 atomic_or(I915_WEDGED, &error->reset_counter);
2283 }
2284
2285 /*
2286 * Note: The wake_up also serves as a memory barrier so that
2287 * waiters see the update value of the reset counter atomic_t.
2288 */
2289 i915_error_wake_up(dev_priv, true);
2290 }
2291 }
2292
2293 static void i915_report_and_clear_eir(struct drm_device *dev)
2294 {
2295 struct drm_i915_private *dev_priv = dev->dev_private;
2296 uint32_t instdone[I915_NUM_INSTDONE_REG];
2297 u32 eir = I915_READ(EIR);
2298 int pipe, i;
2299
2300 if (!eir)
2301 return;
2302
2303 pr_err("render error detected, EIR: 0x%08x\n", eir);
2304
2305 i915_get_extra_instdone(dev, instdone);
2306
2307 if (IS_G4X(dev)) {
2308 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2309 u32 ipeir = I915_READ(IPEIR_I965);
2310
2311 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2312 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2313 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2314 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2315 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2316 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2317 I915_WRITE(IPEIR_I965, ipeir);
2318 POSTING_READ(IPEIR_I965);
2319 }
2320 if (eir & GM45_ERROR_PAGE_TABLE) {
2321 u32 pgtbl_err = I915_READ(PGTBL_ER);
2322 pr_err("page table error\n");
2323 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2324 I915_WRITE(PGTBL_ER, pgtbl_err);
2325 POSTING_READ(PGTBL_ER);
2326 }
2327 }
2328
2329 if (!IS_GEN2(dev)) {
2330 if (eir & I915_ERROR_PAGE_TABLE) {
2331 u32 pgtbl_err = I915_READ(PGTBL_ER);
2332 pr_err("page table error\n");
2333 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2334 I915_WRITE(PGTBL_ER, pgtbl_err);
2335 POSTING_READ(PGTBL_ER);
2336 }
2337 }
2338
2339 if (eir & I915_ERROR_MEMORY_REFRESH) {
2340 pr_err("memory refresh error:\n");
2341 for_each_pipe(dev_priv, pipe)
2342 pr_err("pipe %c stat: 0x%08x\n",
2343 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2344 /* pipestat has already been acked */
2345 }
2346 if (eir & I915_ERROR_INSTRUCTION) {
2347 pr_err("instruction error\n");
2348 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2349 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2350 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2351 if (INTEL_INFO(dev)->gen < 4) {
2352 u32 ipeir = I915_READ(IPEIR);
2353
2354 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2355 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2356 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2357 I915_WRITE(IPEIR, ipeir);
2358 POSTING_READ(IPEIR);
2359 } else {
2360 u32 ipeir = I915_READ(IPEIR_I965);
2361
2362 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2363 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2364 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2365 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2366 I915_WRITE(IPEIR_I965, ipeir);
2367 POSTING_READ(IPEIR_I965);
2368 }
2369 }
2370
2371 I915_WRITE(EIR, eir);
2372 POSTING_READ(EIR);
2373 eir = I915_READ(EIR);
2374 if (eir) {
2375 /*
2376 * some errors might have become stuck,
2377 * mask them.
2378 */
2379 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2380 I915_WRITE(EMR, I915_READ(EMR) | eir);
2381 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2382 }
2383 }
2384
2385 /**
2386 * i915_handle_error - handle a gpu error
2387 * @dev: drm device
2388 *
2389 * Do some basic checking of regsiter state at error time and
2390 * dump it to the syslog. Also call i915_capture_error_state() to make
2391 * sure we get a record and make it available in debugfs. Fire a uevent
2392 * so userspace knows something bad happened (should trigger collection
2393 * of a ring dump etc.).
2394 */
2395 void i915_handle_error(struct drm_device *dev, bool wedged,
2396 const char *fmt, ...)
2397 {
2398 struct drm_i915_private *dev_priv = dev->dev_private;
2399 va_list args;
2400 char error_msg[80];
2401
2402 va_start(args, fmt);
2403 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2404 va_end(args);
2405
2406 i915_capture_error_state(dev, wedged, error_msg);
2407 i915_report_and_clear_eir(dev);
2408
2409 if (wedged) {
2410 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2411 &dev_priv->gpu_error.reset_counter);
2412
2413 /*
2414 * Wakeup waiting processes so that the reset function
2415 * i915_reset_and_wakeup doesn't deadlock trying to grab
2416 * various locks. By bumping the reset counter first, the woken
2417 * processes will see a reset in progress and back off,
2418 * releasing their locks and then wait for the reset completion.
2419 * We must do this for _all_ gpu waiters that might hold locks
2420 * that the reset work needs to acquire.
2421 *
2422 * Note: The wake_up serves as the required memory barrier to
2423 * ensure that the waiters see the updated value of the reset
2424 * counter atomic_t.
2425 */
2426 i915_error_wake_up(dev_priv, false);
2427 }
2428
2429 i915_reset_and_wakeup(dev);
2430 }
2431
2432 /* Called from drm generic code, passed 'crtc' which
2433 * we use as a pipe index
2434 */
2435 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2436 {
2437 struct drm_i915_private *dev_priv = dev->dev_private;
2438 unsigned long irqflags;
2439
2440 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2441 if (INTEL_INFO(dev)->gen >= 4)
2442 i915_enable_pipestat(dev_priv, pipe,
2443 PIPE_START_VBLANK_INTERRUPT_STATUS);
2444 else
2445 i915_enable_pipestat(dev_priv, pipe,
2446 PIPE_VBLANK_INTERRUPT_STATUS);
2447 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2448
2449 return 0;
2450 }
2451
2452 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2453 {
2454 struct drm_i915_private *dev_priv = dev->dev_private;
2455 unsigned long irqflags;
2456 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2457 DE_PIPE_VBLANK(pipe);
2458
2459 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2460 ironlake_enable_display_irq(dev_priv, bit);
2461 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2462
2463 return 0;
2464 }
2465
2466 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2467 {
2468 struct drm_i915_private *dev_priv = dev->dev_private;
2469 unsigned long irqflags;
2470
2471 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2472 i915_enable_pipestat(dev_priv, pipe,
2473 PIPE_START_VBLANK_INTERRUPT_STATUS);
2474 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2475
2476 return 0;
2477 }
2478
2479 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2480 {
2481 struct drm_i915_private *dev_priv = dev->dev_private;
2482 unsigned long irqflags;
2483
2484 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2485 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2486 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2487 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2488 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2489 return 0;
2490 }
2491
2492 /* Called from drm generic code, passed 'crtc' which
2493 * we use as a pipe index
2494 */
2495 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2496 {
2497 struct drm_i915_private *dev_priv = dev->dev_private;
2498 unsigned long irqflags;
2499
2500 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2501 i915_disable_pipestat(dev_priv, pipe,
2502 PIPE_VBLANK_INTERRUPT_STATUS |
2503 PIPE_START_VBLANK_INTERRUPT_STATUS);
2504 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2505 }
2506
2507 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2508 {
2509 struct drm_i915_private *dev_priv = dev->dev_private;
2510 unsigned long irqflags;
2511 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2512 DE_PIPE_VBLANK(pipe);
2513
2514 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2515 ironlake_disable_display_irq(dev_priv, bit);
2516 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2517 }
2518
2519 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2520 {
2521 struct drm_i915_private *dev_priv = dev->dev_private;
2522 unsigned long irqflags;
2523
2524 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2525 i915_disable_pipestat(dev_priv, pipe,
2526 PIPE_START_VBLANK_INTERRUPT_STATUS);
2527 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2528 }
2529
2530 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2531 {
2532 struct drm_i915_private *dev_priv = dev->dev_private;
2533 unsigned long irqflags;
2534
2535 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2536 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2537 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2538 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2539 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2540 }
2541
2542 static bool
2543 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2544 {
2545 return (list_empty(&ring->request_list) ||
2546 i915_seqno_passed(seqno, ring->last_submitted_seqno));
2547 }
2548
2549 static bool
2550 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2551 {
2552 if (INTEL_INFO(dev)->gen >= 8) {
2553 return (ipehr >> 23) == 0x1c;
2554 } else {
2555 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2556 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2557 MI_SEMAPHORE_REGISTER);
2558 }
2559 }
2560
2561 static struct intel_engine_cs *
2562 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2563 {
2564 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2565 struct intel_engine_cs *signaller;
2566 int i;
2567
2568 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2569 for_each_ring(signaller, dev_priv, i) {
2570 if (ring == signaller)
2571 continue;
2572
2573 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2574 return signaller;
2575 }
2576 } else {
2577 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2578
2579 for_each_ring(signaller, dev_priv, i) {
2580 if(ring == signaller)
2581 continue;
2582
2583 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2584 return signaller;
2585 }
2586 }
2587
2588 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2589 ring->id, ipehr, offset);
2590
2591 return NULL;
2592 }
2593
2594 static struct intel_engine_cs *
2595 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2596 {
2597 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2598 u32 cmd, ipehr, head;
2599 u64 offset = 0;
2600 int i, backwards;
2601
2602 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2603 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2604 return NULL;
2605
2606 /*
2607 * HEAD is likely pointing to the dword after the actual command,
2608 * so scan backwards until we find the MBOX. But limit it to just 3
2609 * or 4 dwords depending on the semaphore wait command size.
2610 * Note that we don't care about ACTHD here since that might
2611 * point at at batch, and semaphores are always emitted into the
2612 * ringbuffer itself.
2613 */
2614 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2615 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2616
2617 for (i = backwards; i; --i) {
2618 /*
2619 * Be paranoid and presume the hw has gone off into the wild -
2620 * our ring is smaller than what the hardware (and hence
2621 * HEAD_ADDR) allows. Also handles wrap-around.
2622 */
2623 head &= ring->buffer->size - 1;
2624
2625 /* This here seems to blow up */
2626 cmd = ioread32(ring->buffer->virtual_start + head);
2627 if (cmd == ipehr)
2628 break;
2629
2630 head -= 4;
2631 }
2632
2633 if (!i)
2634 return NULL;
2635
2636 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2637 if (INTEL_INFO(ring->dev)->gen >= 8) {
2638 offset = ioread32(ring->buffer->virtual_start + head + 12);
2639 offset <<= 32;
2640 offset = ioread32(ring->buffer->virtual_start + head + 8);
2641 }
2642 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2643 }
2644
2645 static int semaphore_passed(struct intel_engine_cs *ring)
2646 {
2647 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2648 struct intel_engine_cs *signaller;
2649 u32 seqno;
2650
2651 ring->hangcheck.deadlock++;
2652
2653 signaller = semaphore_waits_for(ring, &seqno);
2654 if (signaller == NULL)
2655 return -1;
2656
2657 /* Prevent pathological recursion due to driver bugs */
2658 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2659 return -1;
2660
2661 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2662 return 1;
2663
2664 /* cursory check for an unkickable deadlock */
2665 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2666 semaphore_passed(signaller) < 0)
2667 return -1;
2668
2669 return 0;
2670 }
2671
2672 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2673 {
2674 struct intel_engine_cs *ring;
2675 int i;
2676
2677 for_each_ring(ring, dev_priv, i)
2678 ring->hangcheck.deadlock = 0;
2679 }
2680
2681 static enum intel_ring_hangcheck_action
2682 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2683 {
2684 struct drm_device *dev = ring->dev;
2685 struct drm_i915_private *dev_priv = dev->dev_private;
2686 u32 tmp;
2687
2688 if (acthd != ring->hangcheck.acthd) {
2689 if (acthd > ring->hangcheck.max_acthd) {
2690 ring->hangcheck.max_acthd = acthd;
2691 return HANGCHECK_ACTIVE;
2692 }
2693
2694 return HANGCHECK_ACTIVE_LOOP;
2695 }
2696
2697 if (IS_GEN2(dev))
2698 return HANGCHECK_HUNG;
2699
2700 /* Is the chip hanging on a WAIT_FOR_EVENT?
2701 * If so we can simply poke the RB_WAIT bit
2702 * and break the hang. This should work on
2703 * all but the second generation chipsets.
2704 */
2705 tmp = I915_READ_CTL(ring);
2706 if (tmp & RING_WAIT) {
2707 i915_handle_error(dev, false,
2708 "Kicking stuck wait on %s",
2709 ring->name);
2710 I915_WRITE_CTL(ring, tmp);
2711 return HANGCHECK_KICK;
2712 }
2713
2714 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2715 switch (semaphore_passed(ring)) {
2716 default:
2717 return HANGCHECK_HUNG;
2718 case 1:
2719 i915_handle_error(dev, false,
2720 "Kicking stuck semaphore on %s",
2721 ring->name);
2722 I915_WRITE_CTL(ring, tmp);
2723 return HANGCHECK_KICK;
2724 case 0:
2725 return HANGCHECK_WAIT;
2726 }
2727 }
2728
2729 return HANGCHECK_HUNG;
2730 }
2731
2732 /*
2733 * This is called when the chip hasn't reported back with completed
2734 * batchbuffers in a long time. We keep track per ring seqno progress and
2735 * if there are no progress, hangcheck score for that ring is increased.
2736 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2737 * we kick the ring. If we see no progress on three subsequent calls
2738 * we assume chip is wedged and try to fix it by resetting the chip.
2739 */
2740 static void i915_hangcheck_elapsed(struct work_struct *work)
2741 {
2742 struct drm_i915_private *dev_priv =
2743 container_of(work, typeof(*dev_priv),
2744 gpu_error.hangcheck_work.work);
2745 struct drm_device *dev = dev_priv->dev;
2746 struct intel_engine_cs *ring;
2747 int i;
2748 int busy_count = 0, rings_hung = 0;
2749 bool stuck[I915_NUM_RINGS] = { 0 };
2750 #define BUSY 1
2751 #define KICK 5
2752 #define HUNG 20
2753
2754 if (!i915.enable_hangcheck)
2755 return;
2756
2757 for_each_ring(ring, dev_priv, i) {
2758 u64 acthd;
2759 u32 seqno;
2760 bool busy = true;
2761
2762 semaphore_clear_deadlocks(dev_priv);
2763
2764 seqno = ring->get_seqno(ring, false);
2765 acthd = intel_ring_get_active_head(ring);
2766
2767 if (ring->hangcheck.seqno == seqno) {
2768 if (ring_idle(ring, seqno)) {
2769 ring->hangcheck.action = HANGCHECK_IDLE;
2770
2771 if (waitqueue_active(&ring->irq_queue)) {
2772 /* Issue a wake-up to catch stuck h/w. */
2773 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2774 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2775 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2776 ring->name);
2777 else
2778 DRM_INFO("Fake missed irq on %s\n",
2779 ring->name);
2780 wake_up_all(&ring->irq_queue);
2781 }
2782 /* Safeguard against driver failure */
2783 ring->hangcheck.score += BUSY;
2784 } else
2785 busy = false;
2786 } else {
2787 /* We always increment the hangcheck score
2788 * if the ring is busy and still processing
2789 * the same request, so that no single request
2790 * can run indefinitely (such as a chain of
2791 * batches). The only time we do not increment
2792 * the hangcheck score on this ring, if this
2793 * ring is in a legitimate wait for another
2794 * ring. In that case the waiting ring is a
2795 * victim and we want to be sure we catch the
2796 * right culprit. Then every time we do kick
2797 * the ring, add a small increment to the
2798 * score so that we can catch a batch that is
2799 * being repeatedly kicked and so responsible
2800 * for stalling the machine.
2801 */
2802 ring->hangcheck.action = ring_stuck(ring,
2803 acthd);
2804
2805 switch (ring->hangcheck.action) {
2806 case HANGCHECK_IDLE:
2807 case HANGCHECK_WAIT:
2808 case HANGCHECK_ACTIVE:
2809 break;
2810 case HANGCHECK_ACTIVE_LOOP:
2811 ring->hangcheck.score += BUSY;
2812 break;
2813 case HANGCHECK_KICK:
2814 ring->hangcheck.score += KICK;
2815 break;
2816 case HANGCHECK_HUNG:
2817 ring->hangcheck.score += HUNG;
2818 stuck[i] = true;
2819 break;
2820 }
2821 }
2822 } else {
2823 ring->hangcheck.action = HANGCHECK_ACTIVE;
2824
2825 /* Gradually reduce the count so that we catch DoS
2826 * attempts across multiple batches.
2827 */
2828 if (ring->hangcheck.score > 0)
2829 ring->hangcheck.score--;
2830
2831 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
2832 }
2833
2834 ring->hangcheck.seqno = seqno;
2835 ring->hangcheck.acthd = acthd;
2836 busy_count += busy;
2837 }
2838
2839 for_each_ring(ring, dev_priv, i) {
2840 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
2841 DRM_INFO("%s on %s\n",
2842 stuck[i] ? "stuck" : "no progress",
2843 ring->name);
2844 rings_hung++;
2845 }
2846 }
2847
2848 if (rings_hung)
2849 return i915_handle_error(dev, true, "Ring hung");
2850
2851 if (busy_count)
2852 /* Reset timer case chip hangs without another request
2853 * being added */
2854 i915_queue_hangcheck(dev);
2855 }
2856
2857 void i915_queue_hangcheck(struct drm_device *dev)
2858 {
2859 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
2860
2861 if (!i915.enable_hangcheck)
2862 return;
2863
2864 /* Don't continually defer the hangcheck so that it is always run at
2865 * least once after work has been scheduled on any ring. Otherwise,
2866 * we will ignore a hung ring if a second ring is kept busy.
2867 */
2868
2869 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
2870 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
2871 }
2872
2873 static void ibx_irq_reset(struct drm_device *dev)
2874 {
2875 struct drm_i915_private *dev_priv = dev->dev_private;
2876
2877 if (HAS_PCH_NOP(dev))
2878 return;
2879
2880 GEN5_IRQ_RESET(SDE);
2881
2882 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2883 I915_WRITE(SERR_INT, 0xffffffff);
2884 }
2885
2886 /*
2887 * SDEIER is also touched by the interrupt handler to work around missed PCH
2888 * interrupts. Hence we can't update it after the interrupt handler is enabled -
2889 * instead we unconditionally enable all PCH interrupt sources here, but then
2890 * only unmask them as needed with SDEIMR.
2891 *
2892 * This function needs to be called before interrupts are enabled.
2893 */
2894 static void ibx_irq_pre_postinstall(struct drm_device *dev)
2895 {
2896 struct drm_i915_private *dev_priv = dev->dev_private;
2897
2898 if (HAS_PCH_NOP(dev))
2899 return;
2900
2901 WARN_ON(I915_READ(SDEIER) != 0);
2902 I915_WRITE(SDEIER, 0xffffffff);
2903 POSTING_READ(SDEIER);
2904 }
2905
2906 static void gen5_gt_irq_reset(struct drm_device *dev)
2907 {
2908 struct drm_i915_private *dev_priv = dev->dev_private;
2909
2910 GEN5_IRQ_RESET(GT);
2911 if (INTEL_INFO(dev)->gen >= 6)
2912 GEN5_IRQ_RESET(GEN6_PM);
2913 }
2914
2915 /* drm_dma.h hooks
2916 */
2917 static void ironlake_irq_reset(struct drm_device *dev)
2918 {
2919 struct drm_i915_private *dev_priv = dev->dev_private;
2920
2921 I915_WRITE(HWSTAM, 0xffffffff);
2922
2923 GEN5_IRQ_RESET(DE);
2924 if (IS_GEN7(dev))
2925 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
2926
2927 gen5_gt_irq_reset(dev);
2928
2929 ibx_irq_reset(dev);
2930 }
2931
2932 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2933 {
2934 enum pipe pipe;
2935
2936 I915_WRITE(PORT_HOTPLUG_EN, 0);
2937 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2938
2939 for_each_pipe(dev_priv, pipe)
2940 I915_WRITE(PIPESTAT(pipe), 0xffff);
2941
2942 GEN5_IRQ_RESET(VLV_);
2943 }
2944
2945 static void valleyview_irq_preinstall(struct drm_device *dev)
2946 {
2947 struct drm_i915_private *dev_priv = dev->dev_private;
2948
2949 /* VLV magic */
2950 I915_WRITE(VLV_IMR, 0);
2951 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2952 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2953 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2954
2955 gen5_gt_irq_reset(dev);
2956
2957 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2958
2959 vlv_display_irq_reset(dev_priv);
2960 }
2961
2962 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
2963 {
2964 GEN8_IRQ_RESET_NDX(GT, 0);
2965 GEN8_IRQ_RESET_NDX(GT, 1);
2966 GEN8_IRQ_RESET_NDX(GT, 2);
2967 GEN8_IRQ_RESET_NDX(GT, 3);
2968 }
2969
2970 static void gen8_irq_reset(struct drm_device *dev)
2971 {
2972 struct drm_i915_private *dev_priv = dev->dev_private;
2973 int pipe;
2974
2975 I915_WRITE(GEN8_MASTER_IRQ, 0);
2976 POSTING_READ(GEN8_MASTER_IRQ);
2977
2978 gen8_gt_irq_reset(dev_priv);
2979
2980 for_each_pipe(dev_priv, pipe)
2981 if (intel_display_power_is_enabled(dev_priv,
2982 POWER_DOMAIN_PIPE(pipe)))
2983 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
2984
2985 GEN5_IRQ_RESET(GEN8_DE_PORT_);
2986 GEN5_IRQ_RESET(GEN8_DE_MISC_);
2987 GEN5_IRQ_RESET(GEN8_PCU_);
2988
2989 if (HAS_PCH_SPLIT(dev))
2990 ibx_irq_reset(dev);
2991 }
2992
2993 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2994 unsigned int pipe_mask)
2995 {
2996 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
2997
2998 spin_lock_irq(&dev_priv->irq_lock);
2999 if (pipe_mask & 1 << PIPE_A)
3000 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3001 dev_priv->de_irq_mask[PIPE_A],
3002 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3003 if (pipe_mask & 1 << PIPE_B)
3004 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3005 dev_priv->de_irq_mask[PIPE_B],
3006 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3007 if (pipe_mask & 1 << PIPE_C)
3008 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3009 dev_priv->de_irq_mask[PIPE_C],
3010 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3011 spin_unlock_irq(&dev_priv->irq_lock);
3012 }
3013
3014 static void cherryview_irq_preinstall(struct drm_device *dev)
3015 {
3016 struct drm_i915_private *dev_priv = dev->dev_private;
3017
3018 I915_WRITE(GEN8_MASTER_IRQ, 0);
3019 POSTING_READ(GEN8_MASTER_IRQ);
3020
3021 gen8_gt_irq_reset(dev_priv);
3022
3023 GEN5_IRQ_RESET(GEN8_PCU_);
3024
3025 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3026
3027 vlv_display_irq_reset(dev_priv);
3028 }
3029
3030 static void ibx_hpd_irq_setup(struct drm_device *dev)
3031 {
3032 struct drm_i915_private *dev_priv = dev->dev_private;
3033 struct intel_encoder *intel_encoder;
3034 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3035
3036 if (HAS_PCH_IBX(dev)) {
3037 hotplug_irqs = SDE_HOTPLUG_MASK;
3038 for_each_intel_encoder(dev, intel_encoder)
3039 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3040 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3041 } else if (HAS_PCH_SPT(dev)) {
3042 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3043 for_each_intel_encoder(dev, intel_encoder)
3044 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3045 enabled_irqs |= hpd_spt[intel_encoder->hpd_pin];
3046 } else {
3047 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3048 for_each_intel_encoder(dev, intel_encoder)
3049 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3050 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3051 }
3052
3053 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3054
3055 /*
3056 * Enable digital hotplug on the PCH, and configure the DP short pulse
3057 * duration to 2ms (which is the minimum in the Display Port spec)
3058 *
3059 * This register is the same on all known PCH chips.
3060 */
3061 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3062 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3063 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3064 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3065 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3066 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3067
3068 /* enable SPT PORTE hot plug */
3069 if (HAS_PCH_SPT(dev)) {
3070 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3071 hotplug |= PORTE_HOTPLUG_ENABLE;
3072 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3073 }
3074 }
3075
3076 static void bxt_hpd_irq_setup(struct drm_device *dev)
3077 {
3078 struct drm_i915_private *dev_priv = dev->dev_private;
3079 struct intel_encoder *intel_encoder;
3080 u32 hotplug_port = 0;
3081 u32 hotplug_ctrl;
3082
3083 /* Now, enable HPD */
3084 for_each_intel_encoder(dev, intel_encoder) {
3085 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state
3086 == HPD_ENABLED)
3087 hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
3088 }
3089
3090 /* Mask all HPD control bits */
3091 hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK;
3092
3093 /* Enable requested port in hotplug control */
3094 /* TODO: implement (short) HPD support on port A */
3095 WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA);
3096 if (hotplug_port & BXT_DE_PORT_HP_DDIB)
3097 hotplug_ctrl |= BXT_DDIB_HPD_ENABLE;
3098 if (hotplug_port & BXT_DE_PORT_HP_DDIC)
3099 hotplug_ctrl |= BXT_DDIC_HPD_ENABLE;
3100 I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl);
3101
3102 /* Unmask DDI hotplug in IMR */
3103 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port;
3104 I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl);
3105
3106 /* Enable DDI hotplug in IER */
3107 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port;
3108 I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl);
3109 POSTING_READ(GEN8_DE_PORT_IER);
3110 }
3111
3112 static void ibx_irq_postinstall(struct drm_device *dev)
3113 {
3114 struct drm_i915_private *dev_priv = dev->dev_private;
3115 u32 mask;
3116
3117 if (HAS_PCH_NOP(dev))
3118 return;
3119
3120 if (HAS_PCH_IBX(dev))
3121 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3122 else
3123 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3124
3125 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3126 I915_WRITE(SDEIMR, ~mask);
3127 }
3128
3129 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3130 {
3131 struct drm_i915_private *dev_priv = dev->dev_private;
3132 u32 pm_irqs, gt_irqs;
3133
3134 pm_irqs = gt_irqs = 0;
3135
3136 dev_priv->gt_irq_mask = ~0;
3137 if (HAS_L3_DPF(dev)) {
3138 /* L3 parity interrupt is always unmasked. */
3139 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3140 gt_irqs |= GT_PARITY_ERROR(dev);
3141 }
3142
3143 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3144 if (IS_GEN5(dev)) {
3145 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3146 ILK_BSD_USER_INTERRUPT;
3147 } else {
3148 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3149 }
3150
3151 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3152
3153 if (INTEL_INFO(dev)->gen >= 6) {
3154 /*
3155 * RPS interrupts will get enabled/disabled on demand when RPS
3156 * itself is enabled/disabled.
3157 */
3158 if (HAS_VEBOX(dev))
3159 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3160
3161 dev_priv->pm_irq_mask = 0xffffffff;
3162 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3163 }
3164 }
3165
3166 static int ironlake_irq_postinstall(struct drm_device *dev)
3167 {
3168 struct drm_i915_private *dev_priv = dev->dev_private;
3169 u32 display_mask, extra_mask;
3170
3171 if (INTEL_INFO(dev)->gen >= 7) {
3172 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3173 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3174 DE_PLANEB_FLIP_DONE_IVB |
3175 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3176 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3177 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3178 } else {
3179 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3180 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3181 DE_AUX_CHANNEL_A |
3182 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3183 DE_POISON);
3184 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3185 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3186 }
3187
3188 dev_priv->irq_mask = ~display_mask;
3189
3190 I915_WRITE(HWSTAM, 0xeffe);
3191
3192 ibx_irq_pre_postinstall(dev);
3193
3194 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3195
3196 gen5_gt_irq_postinstall(dev);
3197
3198 ibx_irq_postinstall(dev);
3199
3200 if (IS_IRONLAKE_M(dev)) {
3201 /* Enable PCU event interrupts
3202 *
3203 * spinlocking not required here for correctness since interrupt
3204 * setup is guaranteed to run in single-threaded context. But we
3205 * need it to make the assert_spin_locked happy. */
3206 spin_lock_irq(&dev_priv->irq_lock);
3207 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3208 spin_unlock_irq(&dev_priv->irq_lock);
3209 }
3210
3211 return 0;
3212 }
3213
3214 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3215 {
3216 u32 pipestat_mask;
3217 u32 iir_mask;
3218 enum pipe pipe;
3219
3220 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3221 PIPE_FIFO_UNDERRUN_STATUS;
3222
3223 for_each_pipe(dev_priv, pipe)
3224 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3225 POSTING_READ(PIPESTAT(PIPE_A));
3226
3227 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3228 PIPE_CRC_DONE_INTERRUPT_STATUS;
3229
3230 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3231 for_each_pipe(dev_priv, pipe)
3232 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3233
3234 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3235 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3236 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3237 if (IS_CHERRYVIEW(dev_priv))
3238 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3239 dev_priv->irq_mask &= ~iir_mask;
3240
3241 I915_WRITE(VLV_IIR, iir_mask);
3242 I915_WRITE(VLV_IIR, iir_mask);
3243 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3244 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3245 POSTING_READ(VLV_IMR);
3246 }
3247
3248 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3249 {
3250 u32 pipestat_mask;
3251 u32 iir_mask;
3252 enum pipe pipe;
3253
3254 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3255 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3256 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3257 if (IS_CHERRYVIEW(dev_priv))
3258 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3259
3260 dev_priv->irq_mask |= iir_mask;
3261 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3262 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3263 I915_WRITE(VLV_IIR, iir_mask);
3264 I915_WRITE(VLV_IIR, iir_mask);
3265 POSTING_READ(VLV_IIR);
3266
3267 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3268 PIPE_CRC_DONE_INTERRUPT_STATUS;
3269
3270 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3271 for_each_pipe(dev_priv, pipe)
3272 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3273
3274 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3275 PIPE_FIFO_UNDERRUN_STATUS;
3276
3277 for_each_pipe(dev_priv, pipe)
3278 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3279 POSTING_READ(PIPESTAT(PIPE_A));
3280 }
3281
3282 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3283 {
3284 assert_spin_locked(&dev_priv->irq_lock);
3285
3286 if (dev_priv->display_irqs_enabled)
3287 return;
3288
3289 dev_priv->display_irqs_enabled = true;
3290
3291 if (intel_irqs_enabled(dev_priv))
3292 valleyview_display_irqs_install(dev_priv);
3293 }
3294
3295 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3296 {
3297 assert_spin_locked(&dev_priv->irq_lock);
3298
3299 if (!dev_priv->display_irqs_enabled)
3300 return;
3301
3302 dev_priv->display_irqs_enabled = false;
3303
3304 if (intel_irqs_enabled(dev_priv))
3305 valleyview_display_irqs_uninstall(dev_priv);
3306 }
3307
3308 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3309 {
3310 dev_priv->irq_mask = ~0;
3311
3312 I915_WRITE(PORT_HOTPLUG_EN, 0);
3313 POSTING_READ(PORT_HOTPLUG_EN);
3314
3315 I915_WRITE(VLV_IIR, 0xffffffff);
3316 I915_WRITE(VLV_IIR, 0xffffffff);
3317 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3318 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3319 POSTING_READ(VLV_IMR);
3320
3321 /* Interrupt setup is already guaranteed to be single-threaded, this is
3322 * just to make the assert_spin_locked check happy. */
3323 spin_lock_irq(&dev_priv->irq_lock);
3324 if (dev_priv->display_irqs_enabled)
3325 valleyview_display_irqs_install(dev_priv);
3326 spin_unlock_irq(&dev_priv->irq_lock);
3327 }
3328
3329 static int valleyview_irq_postinstall(struct drm_device *dev)
3330 {
3331 struct drm_i915_private *dev_priv = dev->dev_private;
3332
3333 vlv_display_irq_postinstall(dev_priv);
3334
3335 gen5_gt_irq_postinstall(dev);
3336
3337 /* ack & enable invalid PTE error interrupts */
3338 #if 0 /* FIXME: add support to irq handler for checking these bits */
3339 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3340 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3341 #endif
3342
3343 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3344
3345 return 0;
3346 }
3347
3348 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3349 {
3350 /* These are interrupts we'll toggle with the ring mask register */
3351 uint32_t gt_interrupts[] = {
3352 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3353 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3354 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3355 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3356 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3357 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3358 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3359 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3360 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3361 0,
3362 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3363 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3364 };
3365
3366 dev_priv->pm_irq_mask = 0xffffffff;
3367 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3368 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3369 /*
3370 * RPS interrupts will get enabled/disabled on demand when RPS itself
3371 * is enabled/disabled.
3372 */
3373 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3374 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3375 }
3376
3377 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3378 {
3379 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3380 uint32_t de_pipe_enables;
3381 int pipe;
3382 u32 de_port_en = GEN8_AUX_CHANNEL_A;
3383
3384 if (IS_GEN9(dev_priv)) {
3385 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3386 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3387 de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3388 GEN9_AUX_CHANNEL_D;
3389
3390 if (IS_BROXTON(dev_priv))
3391 de_port_en |= BXT_DE_PORT_GMBUS;
3392 } else
3393 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3394 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3395
3396 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3397 GEN8_PIPE_FIFO_UNDERRUN;
3398
3399 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3400 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3401 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3402
3403 for_each_pipe(dev_priv, pipe)
3404 if (intel_display_power_is_enabled(dev_priv,
3405 POWER_DOMAIN_PIPE(pipe)))
3406 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3407 dev_priv->de_irq_mask[pipe],
3408 de_pipe_enables);
3409
3410 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en);
3411 }
3412
3413 static int gen8_irq_postinstall(struct drm_device *dev)
3414 {
3415 struct drm_i915_private *dev_priv = dev->dev_private;
3416
3417 if (HAS_PCH_SPLIT(dev))
3418 ibx_irq_pre_postinstall(dev);
3419
3420 gen8_gt_irq_postinstall(dev_priv);
3421 gen8_de_irq_postinstall(dev_priv);
3422
3423 if (HAS_PCH_SPLIT(dev))
3424 ibx_irq_postinstall(dev);
3425
3426 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3427 POSTING_READ(GEN8_MASTER_IRQ);
3428
3429 return 0;
3430 }
3431
3432 static int cherryview_irq_postinstall(struct drm_device *dev)
3433 {
3434 struct drm_i915_private *dev_priv = dev->dev_private;
3435
3436 vlv_display_irq_postinstall(dev_priv);
3437
3438 gen8_gt_irq_postinstall(dev_priv);
3439
3440 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3441 POSTING_READ(GEN8_MASTER_IRQ);
3442
3443 return 0;
3444 }
3445
3446 static void gen8_irq_uninstall(struct drm_device *dev)
3447 {
3448 struct drm_i915_private *dev_priv = dev->dev_private;
3449
3450 if (!dev_priv)
3451 return;
3452
3453 gen8_irq_reset(dev);
3454 }
3455
3456 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3457 {
3458 /* Interrupt setup is already guaranteed to be single-threaded, this is
3459 * just to make the assert_spin_locked check happy. */
3460 spin_lock_irq(&dev_priv->irq_lock);
3461 if (dev_priv->display_irqs_enabled)
3462 valleyview_display_irqs_uninstall(dev_priv);
3463 spin_unlock_irq(&dev_priv->irq_lock);
3464
3465 vlv_display_irq_reset(dev_priv);
3466
3467 dev_priv->irq_mask = ~0;
3468 }
3469
3470 static void valleyview_irq_uninstall(struct drm_device *dev)
3471 {
3472 struct drm_i915_private *dev_priv = dev->dev_private;
3473
3474 if (!dev_priv)
3475 return;
3476
3477 I915_WRITE(VLV_MASTER_IER, 0);
3478
3479 gen5_gt_irq_reset(dev);
3480
3481 I915_WRITE(HWSTAM, 0xffffffff);
3482
3483 vlv_display_irq_uninstall(dev_priv);
3484 }
3485
3486 static void cherryview_irq_uninstall(struct drm_device *dev)
3487 {
3488 struct drm_i915_private *dev_priv = dev->dev_private;
3489
3490 if (!dev_priv)
3491 return;
3492
3493 I915_WRITE(GEN8_MASTER_IRQ, 0);
3494 POSTING_READ(GEN8_MASTER_IRQ);
3495
3496 gen8_gt_irq_reset(dev_priv);
3497
3498 GEN5_IRQ_RESET(GEN8_PCU_);
3499
3500 vlv_display_irq_uninstall(dev_priv);
3501 }
3502
3503 static void ironlake_irq_uninstall(struct drm_device *dev)
3504 {
3505 struct drm_i915_private *dev_priv = dev->dev_private;
3506
3507 if (!dev_priv)
3508 return;
3509
3510 ironlake_irq_reset(dev);
3511 }
3512
3513 static void i8xx_irq_preinstall(struct drm_device * dev)
3514 {
3515 struct drm_i915_private *dev_priv = dev->dev_private;
3516 int pipe;
3517
3518 for_each_pipe(dev_priv, pipe)
3519 I915_WRITE(PIPESTAT(pipe), 0);
3520 I915_WRITE16(IMR, 0xffff);
3521 I915_WRITE16(IER, 0x0);
3522 POSTING_READ16(IER);
3523 }
3524
3525 static int i8xx_irq_postinstall(struct drm_device *dev)
3526 {
3527 struct drm_i915_private *dev_priv = dev->dev_private;
3528
3529 I915_WRITE16(EMR,
3530 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3531
3532 /* Unmask the interrupts that we always want on. */
3533 dev_priv->irq_mask =
3534 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3535 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3536 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3537 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3538 I915_WRITE16(IMR, dev_priv->irq_mask);
3539
3540 I915_WRITE16(IER,
3541 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3542 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3543 I915_USER_INTERRUPT);
3544 POSTING_READ16(IER);
3545
3546 /* Interrupt setup is already guaranteed to be single-threaded, this is
3547 * just to make the assert_spin_locked check happy. */
3548 spin_lock_irq(&dev_priv->irq_lock);
3549 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3550 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3551 spin_unlock_irq(&dev_priv->irq_lock);
3552
3553 return 0;
3554 }
3555
3556 /*
3557 * Returns true when a page flip has completed.
3558 */
3559 static bool i8xx_handle_vblank(struct drm_device *dev,
3560 int plane, int pipe, u32 iir)
3561 {
3562 struct drm_i915_private *dev_priv = dev->dev_private;
3563 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3564
3565 if (!intel_pipe_handle_vblank(dev, pipe))
3566 return false;
3567
3568 if ((iir & flip_pending) == 0)
3569 goto check_page_flip;
3570
3571 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3572 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3573 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3574 * the flip is completed (no longer pending). Since this doesn't raise
3575 * an interrupt per se, we watch for the change at vblank.
3576 */
3577 if (I915_READ16(ISR) & flip_pending)
3578 goto check_page_flip;
3579
3580 intel_prepare_page_flip(dev, plane);
3581 intel_finish_page_flip(dev, pipe);
3582 return true;
3583
3584 check_page_flip:
3585 intel_check_page_flip(dev, pipe);
3586 return false;
3587 }
3588
3589 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3590 {
3591 struct drm_device *dev = arg;
3592 struct drm_i915_private *dev_priv = dev->dev_private;
3593 u16 iir, new_iir;
3594 u32 pipe_stats[2];
3595 int pipe;
3596 u16 flip_mask =
3597 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3598 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3599
3600 if (!intel_irqs_enabled(dev_priv))
3601 return IRQ_NONE;
3602
3603 iir = I915_READ16(IIR);
3604 if (iir == 0)
3605 return IRQ_NONE;
3606
3607 while (iir & ~flip_mask) {
3608 /* Can't rely on pipestat interrupt bit in iir as it might
3609 * have been cleared after the pipestat interrupt was received.
3610 * It doesn't set the bit in iir again, but it still produces
3611 * interrupts (for non-MSI).
3612 */
3613 spin_lock(&dev_priv->irq_lock);
3614 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3615 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3616
3617 for_each_pipe(dev_priv, pipe) {
3618 int reg = PIPESTAT(pipe);
3619 pipe_stats[pipe] = I915_READ(reg);
3620
3621 /*
3622 * Clear the PIPE*STAT regs before the IIR
3623 */
3624 if (pipe_stats[pipe] & 0x8000ffff)
3625 I915_WRITE(reg, pipe_stats[pipe]);
3626 }
3627 spin_unlock(&dev_priv->irq_lock);
3628
3629 I915_WRITE16(IIR, iir & ~flip_mask);
3630 new_iir = I915_READ16(IIR); /* Flush posted writes */
3631
3632 if (iir & I915_USER_INTERRUPT)
3633 notify_ring(&dev_priv->ring[RCS]);
3634
3635 for_each_pipe(dev_priv, pipe) {
3636 int plane = pipe;
3637 if (HAS_FBC(dev))
3638 plane = !plane;
3639
3640 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3641 i8xx_handle_vblank(dev, plane, pipe, iir))
3642 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3643
3644 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3645 i9xx_pipe_crc_irq_handler(dev, pipe);
3646
3647 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3648 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3649 pipe);
3650 }
3651
3652 iir = new_iir;
3653 }
3654
3655 return IRQ_HANDLED;
3656 }
3657
3658 static void i8xx_irq_uninstall(struct drm_device * dev)
3659 {
3660 struct drm_i915_private *dev_priv = dev->dev_private;
3661 int pipe;
3662
3663 for_each_pipe(dev_priv, pipe) {
3664 /* Clear enable bits; then clear status bits */
3665 I915_WRITE(PIPESTAT(pipe), 0);
3666 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3667 }
3668 I915_WRITE16(IMR, 0xffff);
3669 I915_WRITE16(IER, 0x0);
3670 I915_WRITE16(IIR, I915_READ16(IIR));
3671 }
3672
3673 static void i915_irq_preinstall(struct drm_device * dev)
3674 {
3675 struct drm_i915_private *dev_priv = dev->dev_private;
3676 int pipe;
3677
3678 if (I915_HAS_HOTPLUG(dev)) {
3679 I915_WRITE(PORT_HOTPLUG_EN, 0);
3680 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3681 }
3682
3683 I915_WRITE16(HWSTAM, 0xeffe);
3684 for_each_pipe(dev_priv, pipe)
3685 I915_WRITE(PIPESTAT(pipe), 0);
3686 I915_WRITE(IMR, 0xffffffff);
3687 I915_WRITE(IER, 0x0);
3688 POSTING_READ(IER);
3689 }
3690
3691 static int i915_irq_postinstall(struct drm_device *dev)
3692 {
3693 struct drm_i915_private *dev_priv = dev->dev_private;
3694 u32 enable_mask;
3695
3696 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3697
3698 /* Unmask the interrupts that we always want on. */
3699 dev_priv->irq_mask =
3700 ~(I915_ASLE_INTERRUPT |
3701 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3702 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3703 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3704 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3705
3706 enable_mask =
3707 I915_ASLE_INTERRUPT |
3708 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3709 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3710 I915_USER_INTERRUPT;
3711
3712 if (I915_HAS_HOTPLUG(dev)) {
3713 I915_WRITE(PORT_HOTPLUG_EN, 0);
3714 POSTING_READ(PORT_HOTPLUG_EN);
3715
3716 /* Enable in IER... */
3717 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3718 /* and unmask in IMR */
3719 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3720 }
3721
3722 I915_WRITE(IMR, dev_priv->irq_mask);
3723 I915_WRITE(IER, enable_mask);
3724 POSTING_READ(IER);
3725
3726 i915_enable_asle_pipestat(dev);
3727
3728 /* Interrupt setup is already guaranteed to be single-threaded, this is
3729 * just to make the assert_spin_locked check happy. */
3730 spin_lock_irq(&dev_priv->irq_lock);
3731 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3732 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3733 spin_unlock_irq(&dev_priv->irq_lock);
3734
3735 return 0;
3736 }
3737
3738 /*
3739 * Returns true when a page flip has completed.
3740 */
3741 static bool i915_handle_vblank(struct drm_device *dev,
3742 int plane, int pipe, u32 iir)
3743 {
3744 struct drm_i915_private *dev_priv = dev->dev_private;
3745 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3746
3747 if (!intel_pipe_handle_vblank(dev, pipe))
3748 return false;
3749
3750 if ((iir & flip_pending) == 0)
3751 goto check_page_flip;
3752
3753 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3754 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3755 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3756 * the flip is completed (no longer pending). Since this doesn't raise
3757 * an interrupt per se, we watch for the change at vblank.
3758 */
3759 if (I915_READ(ISR) & flip_pending)
3760 goto check_page_flip;
3761
3762 intel_prepare_page_flip(dev, plane);
3763 intel_finish_page_flip(dev, pipe);
3764 return true;
3765
3766 check_page_flip:
3767 intel_check_page_flip(dev, pipe);
3768 return false;
3769 }
3770
3771 static irqreturn_t i915_irq_handler(int irq, void *arg)
3772 {
3773 struct drm_device *dev = arg;
3774 struct drm_i915_private *dev_priv = dev->dev_private;
3775 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3776 u32 flip_mask =
3777 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3778 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3779 int pipe, ret = IRQ_NONE;
3780
3781 if (!intel_irqs_enabled(dev_priv))
3782 return IRQ_NONE;
3783
3784 iir = I915_READ(IIR);
3785 do {
3786 bool irq_received = (iir & ~flip_mask) != 0;
3787 bool blc_event = false;
3788
3789 /* Can't rely on pipestat interrupt bit in iir as it might
3790 * have been cleared after the pipestat interrupt was received.
3791 * It doesn't set the bit in iir again, but it still produces
3792 * interrupts (for non-MSI).
3793 */
3794 spin_lock(&dev_priv->irq_lock);
3795 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3796 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3797
3798 for_each_pipe(dev_priv, pipe) {
3799 int reg = PIPESTAT(pipe);
3800 pipe_stats[pipe] = I915_READ(reg);
3801
3802 /* Clear the PIPE*STAT regs before the IIR */
3803 if (pipe_stats[pipe] & 0x8000ffff) {
3804 I915_WRITE(reg, pipe_stats[pipe]);
3805 irq_received = true;
3806 }
3807 }
3808 spin_unlock(&dev_priv->irq_lock);
3809
3810 if (!irq_received)
3811 break;
3812
3813 /* Consume port. Then clear IIR or we'll miss events */
3814 if (I915_HAS_HOTPLUG(dev) &&
3815 iir & I915_DISPLAY_PORT_INTERRUPT)
3816 i9xx_hpd_irq_handler(dev);
3817
3818 I915_WRITE(IIR, iir & ~flip_mask);
3819 new_iir = I915_READ(IIR); /* Flush posted writes */
3820
3821 if (iir & I915_USER_INTERRUPT)
3822 notify_ring(&dev_priv->ring[RCS]);
3823
3824 for_each_pipe(dev_priv, pipe) {
3825 int plane = pipe;
3826 if (HAS_FBC(dev))
3827 plane = !plane;
3828
3829 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3830 i915_handle_vblank(dev, plane, pipe, iir))
3831 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3832
3833 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3834 blc_event = true;
3835
3836 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3837 i9xx_pipe_crc_irq_handler(dev, pipe);
3838
3839 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3840 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3841 pipe);
3842 }
3843
3844 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3845 intel_opregion_asle_intr(dev);
3846
3847 /* With MSI, interrupts are only generated when iir
3848 * transitions from zero to nonzero. If another bit got
3849 * set while we were handling the existing iir bits, then
3850 * we would never get another interrupt.
3851 *
3852 * This is fine on non-MSI as well, as if we hit this path
3853 * we avoid exiting the interrupt handler only to generate
3854 * another one.
3855 *
3856 * Note that for MSI this could cause a stray interrupt report
3857 * if an interrupt landed in the time between writing IIR and
3858 * the posting read. This should be rare enough to never
3859 * trigger the 99% of 100,000 interrupts test for disabling
3860 * stray interrupts.
3861 */
3862 ret = IRQ_HANDLED;
3863 iir = new_iir;
3864 } while (iir & ~flip_mask);
3865
3866 return ret;
3867 }
3868
3869 static void i915_irq_uninstall(struct drm_device * dev)
3870 {
3871 struct drm_i915_private *dev_priv = dev->dev_private;
3872 int pipe;
3873
3874 if (I915_HAS_HOTPLUG(dev)) {
3875 I915_WRITE(PORT_HOTPLUG_EN, 0);
3876 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3877 }
3878
3879 I915_WRITE16(HWSTAM, 0xffff);
3880 for_each_pipe(dev_priv, pipe) {
3881 /* Clear enable bits; then clear status bits */
3882 I915_WRITE(PIPESTAT(pipe), 0);
3883 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3884 }
3885 I915_WRITE(IMR, 0xffffffff);
3886 I915_WRITE(IER, 0x0);
3887
3888 I915_WRITE(IIR, I915_READ(IIR));
3889 }
3890
3891 static void i965_irq_preinstall(struct drm_device * dev)
3892 {
3893 struct drm_i915_private *dev_priv = dev->dev_private;
3894 int pipe;
3895
3896 I915_WRITE(PORT_HOTPLUG_EN, 0);
3897 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3898
3899 I915_WRITE(HWSTAM, 0xeffe);
3900 for_each_pipe(dev_priv, pipe)
3901 I915_WRITE(PIPESTAT(pipe), 0);
3902 I915_WRITE(IMR, 0xffffffff);
3903 I915_WRITE(IER, 0x0);
3904 POSTING_READ(IER);
3905 }
3906
3907 static int i965_irq_postinstall(struct drm_device *dev)
3908 {
3909 struct drm_i915_private *dev_priv = dev->dev_private;
3910 u32 enable_mask;
3911 u32 error_mask;
3912
3913 /* Unmask the interrupts that we always want on. */
3914 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3915 I915_DISPLAY_PORT_INTERRUPT |
3916 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3917 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3918 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3919 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3920 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3921
3922 enable_mask = ~dev_priv->irq_mask;
3923 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3924 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3925 enable_mask |= I915_USER_INTERRUPT;
3926
3927 if (IS_G4X(dev))
3928 enable_mask |= I915_BSD_USER_INTERRUPT;
3929
3930 /* Interrupt setup is already guaranteed to be single-threaded, this is
3931 * just to make the assert_spin_locked check happy. */
3932 spin_lock_irq(&dev_priv->irq_lock);
3933 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3934 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3935 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3936 spin_unlock_irq(&dev_priv->irq_lock);
3937
3938 /*
3939 * Enable some error detection, note the instruction error mask
3940 * bit is reserved, so we leave it masked.
3941 */
3942 if (IS_G4X(dev)) {
3943 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3944 GM45_ERROR_MEM_PRIV |
3945 GM45_ERROR_CP_PRIV |
3946 I915_ERROR_MEMORY_REFRESH);
3947 } else {
3948 error_mask = ~(I915_ERROR_PAGE_TABLE |
3949 I915_ERROR_MEMORY_REFRESH);
3950 }
3951 I915_WRITE(EMR, error_mask);
3952
3953 I915_WRITE(IMR, dev_priv->irq_mask);
3954 I915_WRITE(IER, enable_mask);
3955 POSTING_READ(IER);
3956
3957 I915_WRITE(PORT_HOTPLUG_EN, 0);
3958 POSTING_READ(PORT_HOTPLUG_EN);
3959
3960 i915_enable_asle_pipestat(dev);
3961
3962 return 0;
3963 }
3964
3965 static void i915_hpd_irq_setup(struct drm_device *dev)
3966 {
3967 struct drm_i915_private *dev_priv = dev->dev_private;
3968 struct intel_encoder *intel_encoder;
3969 u32 hotplug_en;
3970
3971 assert_spin_locked(&dev_priv->irq_lock);
3972
3973 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3974 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3975 /* Note HDMI and DP share hotplug bits */
3976 /* enable bits are the same for all generations */
3977 for_each_intel_encoder(dev, intel_encoder)
3978 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3979 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3980 /* Programming the CRT detection parameters tends
3981 to generate a spurious hotplug event about three
3982 seconds later. So just do it once.
3983 */
3984 if (IS_G4X(dev))
3985 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3986 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3987 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3988
3989 /* Ignore TV since it's buggy */
3990 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3991 }
3992
3993 static irqreturn_t i965_irq_handler(int irq, void *arg)
3994 {
3995 struct drm_device *dev = arg;
3996 struct drm_i915_private *dev_priv = dev->dev_private;
3997 u32 iir, new_iir;
3998 u32 pipe_stats[I915_MAX_PIPES];
3999 int ret = IRQ_NONE, pipe;
4000 u32 flip_mask =
4001 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4002 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4003
4004 if (!intel_irqs_enabled(dev_priv))
4005 return IRQ_NONE;
4006
4007 iir = I915_READ(IIR);
4008
4009 for (;;) {
4010 bool irq_received = (iir & ~flip_mask) != 0;
4011 bool blc_event = false;
4012
4013 /* Can't rely on pipestat interrupt bit in iir as it might
4014 * have been cleared after the pipestat interrupt was received.
4015 * It doesn't set the bit in iir again, but it still produces
4016 * interrupts (for non-MSI).
4017 */
4018 spin_lock(&dev_priv->irq_lock);
4019 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4020 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4021
4022 for_each_pipe(dev_priv, pipe) {
4023 int reg = PIPESTAT(pipe);
4024 pipe_stats[pipe] = I915_READ(reg);
4025
4026 /*
4027 * Clear the PIPE*STAT regs before the IIR
4028 */
4029 if (pipe_stats[pipe] & 0x8000ffff) {
4030 I915_WRITE(reg, pipe_stats[pipe]);
4031 irq_received = true;
4032 }
4033 }
4034 spin_unlock(&dev_priv->irq_lock);
4035
4036 if (!irq_received)
4037 break;
4038
4039 ret = IRQ_HANDLED;
4040
4041 /* Consume port. Then clear IIR or we'll miss events */
4042 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4043 i9xx_hpd_irq_handler(dev);
4044
4045 I915_WRITE(IIR, iir & ~flip_mask);
4046 new_iir = I915_READ(IIR); /* Flush posted writes */
4047
4048 if (iir & I915_USER_INTERRUPT)
4049 notify_ring(&dev_priv->ring[RCS]);
4050 if (iir & I915_BSD_USER_INTERRUPT)
4051 notify_ring(&dev_priv->ring[VCS]);
4052
4053 for_each_pipe(dev_priv, pipe) {
4054 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4055 i915_handle_vblank(dev, pipe, pipe, iir))
4056 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4057
4058 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4059 blc_event = true;
4060
4061 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4062 i9xx_pipe_crc_irq_handler(dev, pipe);
4063
4064 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4065 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4066 }
4067
4068 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4069 intel_opregion_asle_intr(dev);
4070
4071 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4072 gmbus_irq_handler(dev);
4073
4074 /* With MSI, interrupts are only generated when iir
4075 * transitions from zero to nonzero. If another bit got
4076 * set while we were handling the existing iir bits, then
4077 * we would never get another interrupt.
4078 *
4079 * This is fine on non-MSI as well, as if we hit this path
4080 * we avoid exiting the interrupt handler only to generate
4081 * another one.
4082 *
4083 * Note that for MSI this could cause a stray interrupt report
4084 * if an interrupt landed in the time between writing IIR and
4085 * the posting read. This should be rare enough to never
4086 * trigger the 99% of 100,000 interrupts test for disabling
4087 * stray interrupts.
4088 */
4089 iir = new_iir;
4090 }
4091
4092 return ret;
4093 }
4094
4095 static void i965_irq_uninstall(struct drm_device * dev)
4096 {
4097 struct drm_i915_private *dev_priv = dev->dev_private;
4098 int pipe;
4099
4100 if (!dev_priv)
4101 return;
4102
4103 I915_WRITE(PORT_HOTPLUG_EN, 0);
4104 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4105
4106 I915_WRITE(HWSTAM, 0xffffffff);
4107 for_each_pipe(dev_priv, pipe)
4108 I915_WRITE(PIPESTAT(pipe), 0);
4109 I915_WRITE(IMR, 0xffffffff);
4110 I915_WRITE(IER, 0x0);
4111
4112 for_each_pipe(dev_priv, pipe)
4113 I915_WRITE(PIPESTAT(pipe),
4114 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4115 I915_WRITE(IIR, I915_READ(IIR));
4116 }
4117
4118 /**
4119 * intel_irq_init - initializes irq support
4120 * @dev_priv: i915 device instance
4121 *
4122 * This function initializes all the irq support including work items, timers
4123 * and all the vtables. It does not setup the interrupt itself though.
4124 */
4125 void intel_irq_init(struct drm_i915_private *dev_priv)
4126 {
4127 struct drm_device *dev = dev_priv->dev;
4128
4129 intel_hpd_init_work(dev_priv);
4130
4131 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4132 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4133
4134 /* Let's track the enabled rps events */
4135 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4136 /* WaGsvRC0ResidencyMethod:vlv */
4137 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4138 else
4139 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4140
4141 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4142 i915_hangcheck_elapsed);
4143
4144 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4145
4146 if (IS_GEN2(dev_priv)) {
4147 dev->max_vblank_count = 0;
4148 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4149 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4150 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4151 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4152 } else {
4153 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4154 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4155 }
4156
4157 /*
4158 * Opt out of the vblank disable timer on everything except gen2.
4159 * Gen2 doesn't have a hardware frame counter and so depends on
4160 * vblank interrupts to produce sane vblank seuquence numbers.
4161 */
4162 if (!IS_GEN2(dev_priv))
4163 dev->vblank_disable_immediate = true;
4164
4165 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4166 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4167
4168 if (IS_CHERRYVIEW(dev_priv)) {
4169 dev->driver->irq_handler = cherryview_irq_handler;
4170 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4171 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4172 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4173 dev->driver->enable_vblank = valleyview_enable_vblank;
4174 dev->driver->disable_vblank = valleyview_disable_vblank;
4175 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4176 } else if (IS_VALLEYVIEW(dev_priv)) {
4177 dev->driver->irq_handler = valleyview_irq_handler;
4178 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4179 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4180 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4181 dev->driver->enable_vblank = valleyview_enable_vblank;
4182 dev->driver->disable_vblank = valleyview_disable_vblank;
4183 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4184 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4185 dev->driver->irq_handler = gen8_irq_handler;
4186 dev->driver->irq_preinstall = gen8_irq_reset;
4187 dev->driver->irq_postinstall = gen8_irq_postinstall;
4188 dev->driver->irq_uninstall = gen8_irq_uninstall;
4189 dev->driver->enable_vblank = gen8_enable_vblank;
4190 dev->driver->disable_vblank = gen8_disable_vblank;
4191 if (HAS_PCH_SPLIT(dev))
4192 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4193 else
4194 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4195 } else if (HAS_PCH_SPLIT(dev)) {
4196 dev->driver->irq_handler = ironlake_irq_handler;
4197 dev->driver->irq_preinstall = ironlake_irq_reset;
4198 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4199 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4200 dev->driver->enable_vblank = ironlake_enable_vblank;
4201 dev->driver->disable_vblank = ironlake_disable_vblank;
4202 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4203 } else {
4204 if (INTEL_INFO(dev_priv)->gen == 2) {
4205 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4206 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4207 dev->driver->irq_handler = i8xx_irq_handler;
4208 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4209 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4210 dev->driver->irq_preinstall = i915_irq_preinstall;
4211 dev->driver->irq_postinstall = i915_irq_postinstall;
4212 dev->driver->irq_uninstall = i915_irq_uninstall;
4213 dev->driver->irq_handler = i915_irq_handler;
4214 } else {
4215 dev->driver->irq_preinstall = i965_irq_preinstall;
4216 dev->driver->irq_postinstall = i965_irq_postinstall;
4217 dev->driver->irq_uninstall = i965_irq_uninstall;
4218 dev->driver->irq_handler = i965_irq_handler;
4219 }
4220 if (I915_HAS_HOTPLUG(dev_priv))
4221 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4222 dev->driver->enable_vblank = i915_enable_vblank;
4223 dev->driver->disable_vblank = i915_disable_vblank;
4224 }
4225 }
4226
4227 /**
4228 * intel_irq_install - enables the hardware interrupt
4229 * @dev_priv: i915 device instance
4230 *
4231 * This function enables the hardware interrupt handling, but leaves the hotplug
4232 * handling still disabled. It is called after intel_irq_init().
4233 *
4234 * In the driver load and resume code we need working interrupts in a few places
4235 * but don't want to deal with the hassle of concurrent probe and hotplug
4236 * workers. Hence the split into this two-stage approach.
4237 */
4238 int intel_irq_install(struct drm_i915_private *dev_priv)
4239 {
4240 /*
4241 * We enable some interrupt sources in our postinstall hooks, so mark
4242 * interrupts as enabled _before_ actually enabling them to avoid
4243 * special cases in our ordering checks.
4244 */
4245 dev_priv->pm.irqs_enabled = true;
4246
4247 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4248 }
4249
4250 /**
4251 * intel_irq_uninstall - finilizes all irq handling
4252 * @dev_priv: i915 device instance
4253 *
4254 * This stops interrupt and hotplug handling and unregisters and frees all
4255 * resources acquired in the init functions.
4256 */
4257 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4258 {
4259 drm_irq_uninstall(dev_priv->dev);
4260 intel_hpd_cancel_work(dev_priv);
4261 dev_priv->pm.irqs_enabled = false;
4262 }
4263
4264 /**
4265 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4266 * @dev_priv: i915 device instance
4267 *
4268 * This function is used to disable interrupts at runtime, both in the runtime
4269 * pm and the system suspend/resume code.
4270 */
4271 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4272 {
4273 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4274 dev_priv->pm.irqs_enabled = false;
4275 synchronize_irq(dev_priv->dev->irq);
4276 }
4277
4278 /**
4279 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4280 * @dev_priv: i915 device instance
4281 *
4282 * This function is used to enable interrupts at runtime, both in the runtime
4283 * pm and the system suspend/resume code.
4284 */
4285 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4286 {
4287 dev_priv->pm.irqs_enabled = true;
4288 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4289 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4290 }