]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/i915/i915_irq.c
Merge branches 'for-4.4/upstream-fixes', 'for-4.5/async-suspend', 'for-4.5/container...
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 /**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
48 static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50 };
51
52 static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54 };
55
56 static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58 };
59
60 static const u32 hpd_ibx[HPD_NUM_PINS] = {
61 [HPD_CRT] = SDE_CRT_HOTPLUG,
62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66 };
67
68 static const u32 hpd_cpt[HPD_NUM_PINS] = {
69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74 };
75
76 static const u32 hpd_spt[HPD_NUM_PINS] = {
77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82 };
83
84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91 };
92
93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100 };
101
102 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109 };
110
111 /* BXT hpd list */
112 static const u32 hpd_bxt[HPD_NUM_PINS] = {
113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116 };
117
118 /* IIR can theoretically queue up two events. Be paranoid. */
119 #define GEN8_IRQ_RESET_NDX(type, which) do { \
120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IMR(which)); \
122 I915_WRITE(GEN8_##type##_IER(which), 0); \
123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 POSTING_READ(GEN8_##type##_IIR(which)); \
125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 POSTING_READ(GEN8_##type##_IIR(which)); \
127 } while (0)
128
129 #define GEN5_IRQ_RESET(type) do { \
130 I915_WRITE(type##IMR, 0xffffffff); \
131 POSTING_READ(type##IMR); \
132 I915_WRITE(type##IER, 0); \
133 I915_WRITE(type##IIR, 0xffffffff); \
134 POSTING_READ(type##IIR); \
135 I915_WRITE(type##IIR, 0xffffffff); \
136 POSTING_READ(type##IIR); \
137 } while (0)
138
139 /*
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141 */
142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
143 {
144 u32 val = I915_READ(reg);
145
146 if (val == 0)
147 return;
148
149 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
150 reg, val);
151 I915_WRITE(reg, 0xffffffff);
152 POSTING_READ(reg);
153 I915_WRITE(reg, 0xffffffff);
154 POSTING_READ(reg);
155 }
156
157 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
158 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
159 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
160 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
161 POSTING_READ(GEN8_##type##_IMR(which)); \
162 } while (0)
163
164 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
165 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
166 I915_WRITE(type##IER, (ier_val)); \
167 I915_WRITE(type##IMR, (imr_val)); \
168 POSTING_READ(type##IMR); \
169 } while (0)
170
171 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
172
173 /* For display hotplug interrupt */
174 static inline void
175 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
176 uint32_t mask,
177 uint32_t bits)
178 {
179 uint32_t val;
180
181 assert_spin_locked(&dev_priv->irq_lock);
182 WARN_ON(bits & ~mask);
183
184 val = I915_READ(PORT_HOTPLUG_EN);
185 val &= ~mask;
186 val |= bits;
187 I915_WRITE(PORT_HOTPLUG_EN, val);
188 }
189
190 /**
191 * i915_hotplug_interrupt_update - update hotplug interrupt enable
192 * @dev_priv: driver private
193 * @mask: bits to update
194 * @bits: bits to enable
195 * NOTE: the HPD enable bits are modified both inside and outside
196 * of an interrupt context. To avoid that read-modify-write cycles
197 * interfer, these bits are protected by a spinlock. Since this
198 * function is usually not called from a context where the lock is
199 * held already, this function acquires the lock itself. A non-locking
200 * version is also available.
201 */
202 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
203 uint32_t mask,
204 uint32_t bits)
205 {
206 spin_lock_irq(&dev_priv->irq_lock);
207 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
208 spin_unlock_irq(&dev_priv->irq_lock);
209 }
210
211 /**
212 * ilk_update_display_irq - update DEIMR
213 * @dev_priv: driver private
214 * @interrupt_mask: mask of interrupt bits to update
215 * @enabled_irq_mask: mask of interrupt bits to enable
216 */
217 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
218 uint32_t interrupt_mask,
219 uint32_t enabled_irq_mask)
220 {
221 uint32_t new_val;
222
223 assert_spin_locked(&dev_priv->irq_lock);
224
225 WARN_ON(enabled_irq_mask & ~interrupt_mask);
226
227 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
228 return;
229
230 new_val = dev_priv->irq_mask;
231 new_val &= ~interrupt_mask;
232 new_val |= (~enabled_irq_mask & interrupt_mask);
233
234 if (new_val != dev_priv->irq_mask) {
235 dev_priv->irq_mask = new_val;
236 I915_WRITE(DEIMR, dev_priv->irq_mask);
237 POSTING_READ(DEIMR);
238 }
239 }
240
241 void
242 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
243 {
244 ilk_update_display_irq(dev_priv, mask, mask);
245 }
246
247 void
248 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
249 {
250 ilk_update_display_irq(dev_priv, mask, 0);
251 }
252
253 /**
254 * ilk_update_gt_irq - update GTIMR
255 * @dev_priv: driver private
256 * @interrupt_mask: mask of interrupt bits to update
257 * @enabled_irq_mask: mask of interrupt bits to enable
258 */
259 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
260 uint32_t interrupt_mask,
261 uint32_t enabled_irq_mask)
262 {
263 assert_spin_locked(&dev_priv->irq_lock);
264
265 WARN_ON(enabled_irq_mask & ~interrupt_mask);
266
267 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
268 return;
269
270 dev_priv->gt_irq_mask &= ~interrupt_mask;
271 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
272 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
273 POSTING_READ(GTIMR);
274 }
275
276 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
277 {
278 ilk_update_gt_irq(dev_priv, mask, mask);
279 }
280
281 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
282 {
283 ilk_update_gt_irq(dev_priv, mask, 0);
284 }
285
286 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
287 {
288 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
289 }
290
291 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
292 {
293 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
294 }
295
296 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
297 {
298 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
299 }
300
301 /**
302 * snb_update_pm_irq - update GEN6_PMIMR
303 * @dev_priv: driver private
304 * @interrupt_mask: mask of interrupt bits to update
305 * @enabled_irq_mask: mask of interrupt bits to enable
306 */
307 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
308 uint32_t interrupt_mask,
309 uint32_t enabled_irq_mask)
310 {
311 uint32_t new_val;
312
313 WARN_ON(enabled_irq_mask & ~interrupt_mask);
314
315 assert_spin_locked(&dev_priv->irq_lock);
316
317 new_val = dev_priv->pm_irq_mask;
318 new_val &= ~interrupt_mask;
319 new_val |= (~enabled_irq_mask & interrupt_mask);
320
321 if (new_val != dev_priv->pm_irq_mask) {
322 dev_priv->pm_irq_mask = new_val;
323 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
324 POSTING_READ(gen6_pm_imr(dev_priv));
325 }
326 }
327
328 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
329 {
330 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
331 return;
332
333 snb_update_pm_irq(dev_priv, mask, mask);
334 }
335
336 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
337 uint32_t mask)
338 {
339 snb_update_pm_irq(dev_priv, mask, 0);
340 }
341
342 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
343 {
344 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
345 return;
346
347 __gen6_disable_pm_irq(dev_priv, mask);
348 }
349
350 void gen6_reset_rps_interrupts(struct drm_device *dev)
351 {
352 struct drm_i915_private *dev_priv = dev->dev_private;
353 uint32_t reg = gen6_pm_iir(dev_priv);
354
355 spin_lock_irq(&dev_priv->irq_lock);
356 I915_WRITE(reg, dev_priv->pm_rps_events);
357 I915_WRITE(reg, dev_priv->pm_rps_events);
358 POSTING_READ(reg);
359 dev_priv->rps.pm_iir = 0;
360 spin_unlock_irq(&dev_priv->irq_lock);
361 }
362
363 void gen6_enable_rps_interrupts(struct drm_device *dev)
364 {
365 struct drm_i915_private *dev_priv = dev->dev_private;
366
367 spin_lock_irq(&dev_priv->irq_lock);
368
369 WARN_ON(dev_priv->rps.pm_iir);
370 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
371 dev_priv->rps.interrupts_enabled = true;
372 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
373 dev_priv->pm_rps_events);
374 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
375
376 spin_unlock_irq(&dev_priv->irq_lock);
377 }
378
379 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
380 {
381 /*
382 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
383 * if GEN6_PM_UP_EI_EXPIRED is masked.
384 *
385 * TODO: verify if this can be reproduced on VLV,CHV.
386 */
387 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
388 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
389
390 if (INTEL_INFO(dev_priv)->gen >= 8)
391 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
392
393 return mask;
394 }
395
396 void gen6_disable_rps_interrupts(struct drm_device *dev)
397 {
398 struct drm_i915_private *dev_priv = dev->dev_private;
399
400 spin_lock_irq(&dev_priv->irq_lock);
401 dev_priv->rps.interrupts_enabled = false;
402 spin_unlock_irq(&dev_priv->irq_lock);
403
404 cancel_work_sync(&dev_priv->rps.work);
405
406 spin_lock_irq(&dev_priv->irq_lock);
407
408 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
409
410 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
411 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
412 ~dev_priv->pm_rps_events);
413
414 spin_unlock_irq(&dev_priv->irq_lock);
415
416 synchronize_irq(dev->irq);
417 }
418
419 /**
420 * bdw_update_port_irq - update DE port interrupt
421 * @dev_priv: driver private
422 * @interrupt_mask: mask of interrupt bits to update
423 * @enabled_irq_mask: mask of interrupt bits to enable
424 */
425 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
426 uint32_t interrupt_mask,
427 uint32_t enabled_irq_mask)
428 {
429 uint32_t new_val;
430 uint32_t old_val;
431
432 assert_spin_locked(&dev_priv->irq_lock);
433
434 WARN_ON(enabled_irq_mask & ~interrupt_mask);
435
436 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
437 return;
438
439 old_val = I915_READ(GEN8_DE_PORT_IMR);
440
441 new_val = old_val;
442 new_val &= ~interrupt_mask;
443 new_val |= (~enabled_irq_mask & interrupt_mask);
444
445 if (new_val != old_val) {
446 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
447 POSTING_READ(GEN8_DE_PORT_IMR);
448 }
449 }
450
451 /**
452 * ibx_display_interrupt_update - update SDEIMR
453 * @dev_priv: driver private
454 * @interrupt_mask: mask of interrupt bits to update
455 * @enabled_irq_mask: mask of interrupt bits to enable
456 */
457 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
458 uint32_t interrupt_mask,
459 uint32_t enabled_irq_mask)
460 {
461 uint32_t sdeimr = I915_READ(SDEIMR);
462 sdeimr &= ~interrupt_mask;
463 sdeimr |= (~enabled_irq_mask & interrupt_mask);
464
465 WARN_ON(enabled_irq_mask & ~interrupt_mask);
466
467 assert_spin_locked(&dev_priv->irq_lock);
468
469 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
470 return;
471
472 I915_WRITE(SDEIMR, sdeimr);
473 POSTING_READ(SDEIMR);
474 }
475
476 static void
477 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
478 u32 enable_mask, u32 status_mask)
479 {
480 u32 reg = PIPESTAT(pipe);
481 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
482
483 assert_spin_locked(&dev_priv->irq_lock);
484 WARN_ON(!intel_irqs_enabled(dev_priv));
485
486 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
487 status_mask & ~PIPESTAT_INT_STATUS_MASK,
488 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
489 pipe_name(pipe), enable_mask, status_mask))
490 return;
491
492 if ((pipestat & enable_mask) == enable_mask)
493 return;
494
495 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
496
497 /* Enable the interrupt, clear any pending status */
498 pipestat |= enable_mask | status_mask;
499 I915_WRITE(reg, pipestat);
500 POSTING_READ(reg);
501 }
502
503 static void
504 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
505 u32 enable_mask, u32 status_mask)
506 {
507 u32 reg = PIPESTAT(pipe);
508 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
509
510 assert_spin_locked(&dev_priv->irq_lock);
511 WARN_ON(!intel_irqs_enabled(dev_priv));
512
513 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
514 status_mask & ~PIPESTAT_INT_STATUS_MASK,
515 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
516 pipe_name(pipe), enable_mask, status_mask))
517 return;
518
519 if ((pipestat & enable_mask) == 0)
520 return;
521
522 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
523
524 pipestat &= ~enable_mask;
525 I915_WRITE(reg, pipestat);
526 POSTING_READ(reg);
527 }
528
529 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
530 {
531 u32 enable_mask = status_mask << 16;
532
533 /*
534 * On pipe A we don't support the PSR interrupt yet,
535 * on pipe B and C the same bit MBZ.
536 */
537 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
538 return 0;
539 /*
540 * On pipe B and C we don't support the PSR interrupt yet, on pipe
541 * A the same bit is for perf counters which we don't use either.
542 */
543 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
544 return 0;
545
546 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
547 SPRITE0_FLIP_DONE_INT_EN_VLV |
548 SPRITE1_FLIP_DONE_INT_EN_VLV);
549 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
550 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
551 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
552 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
553
554 return enable_mask;
555 }
556
557 void
558 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
559 u32 status_mask)
560 {
561 u32 enable_mask;
562
563 if (IS_VALLEYVIEW(dev_priv->dev))
564 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
565 status_mask);
566 else
567 enable_mask = status_mask << 16;
568 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
569 }
570
571 void
572 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
573 u32 status_mask)
574 {
575 u32 enable_mask;
576
577 if (IS_VALLEYVIEW(dev_priv->dev))
578 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
579 status_mask);
580 else
581 enable_mask = status_mask << 16;
582 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
583 }
584
585 /**
586 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
587 * @dev: drm device
588 */
589 static void i915_enable_asle_pipestat(struct drm_device *dev)
590 {
591 struct drm_i915_private *dev_priv = dev->dev_private;
592
593 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
594 return;
595
596 spin_lock_irq(&dev_priv->irq_lock);
597
598 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
599 if (INTEL_INFO(dev)->gen >= 4)
600 i915_enable_pipestat(dev_priv, PIPE_A,
601 PIPE_LEGACY_BLC_EVENT_STATUS);
602
603 spin_unlock_irq(&dev_priv->irq_lock);
604 }
605
606 /*
607 * This timing diagram depicts the video signal in and
608 * around the vertical blanking period.
609 *
610 * Assumptions about the fictitious mode used in this example:
611 * vblank_start >= 3
612 * vsync_start = vblank_start + 1
613 * vsync_end = vblank_start + 2
614 * vtotal = vblank_start + 3
615 *
616 * start of vblank:
617 * latch double buffered registers
618 * increment frame counter (ctg+)
619 * generate start of vblank interrupt (gen4+)
620 * |
621 * | frame start:
622 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
623 * | may be shifted forward 1-3 extra lines via PIPECONF
624 * | |
625 * | | start of vsync:
626 * | | generate vsync interrupt
627 * | | |
628 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
629 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
630 * ----va---> <-----------------vb--------------------> <--------va-------------
631 * | | <----vs-----> |
632 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
633 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
634 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
635 * | | |
636 * last visible pixel first visible pixel
637 * | increment frame counter (gen3/4)
638 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
639 *
640 * x = horizontal active
641 * _ = horizontal blanking
642 * hs = horizontal sync
643 * va = vertical active
644 * vb = vertical blanking
645 * vs = vertical sync
646 * vbs = vblank_start (number)
647 *
648 * Summary:
649 * - most events happen at the start of horizontal sync
650 * - frame start happens at the start of horizontal blank, 1-4 lines
651 * (depending on PIPECONF settings) after the start of vblank
652 * - gen3/4 pixel and frame counter are synchronized with the start
653 * of horizontal active on the first line of vertical active
654 */
655
656 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
657 {
658 /* Gen2 doesn't have a hardware frame counter */
659 return 0;
660 }
661
662 /* Called from drm generic code, passed a 'crtc', which
663 * we use as a pipe index
664 */
665 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
666 {
667 struct drm_i915_private *dev_priv = dev->dev_private;
668 unsigned long high_frame;
669 unsigned long low_frame;
670 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
671 struct intel_crtc *intel_crtc =
672 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
673 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
674
675 htotal = mode->crtc_htotal;
676 hsync_start = mode->crtc_hsync_start;
677 vbl_start = mode->crtc_vblank_start;
678 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
679 vbl_start = DIV_ROUND_UP(vbl_start, 2);
680
681 /* Convert to pixel count */
682 vbl_start *= htotal;
683
684 /* Start of vblank event occurs at start of hsync */
685 vbl_start -= htotal - hsync_start;
686
687 high_frame = PIPEFRAME(pipe);
688 low_frame = PIPEFRAMEPIXEL(pipe);
689
690 /*
691 * High & low register fields aren't synchronized, so make sure
692 * we get a low value that's stable across two reads of the high
693 * register.
694 */
695 do {
696 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
697 low = I915_READ(low_frame);
698 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
699 } while (high1 != high2);
700
701 high1 >>= PIPE_FRAME_HIGH_SHIFT;
702 pixel = low & PIPE_PIXEL_MASK;
703 low >>= PIPE_FRAME_LOW_SHIFT;
704
705 /*
706 * The frame counter increments at beginning of active.
707 * Cook up a vblank counter by also checking the pixel
708 * counter against vblank start.
709 */
710 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
711 }
712
713 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
714 {
715 struct drm_i915_private *dev_priv = dev->dev_private;
716
717 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
718 }
719
720 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
721 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
722
723 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
724 {
725 struct drm_device *dev = crtc->base.dev;
726 struct drm_i915_private *dev_priv = dev->dev_private;
727 const struct drm_display_mode *mode = &crtc->base.hwmode;
728 enum pipe pipe = crtc->pipe;
729 int position, vtotal;
730
731 vtotal = mode->crtc_vtotal;
732 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
733 vtotal /= 2;
734
735 if (IS_GEN2(dev))
736 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
737 else
738 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
739
740 /*
741 * On HSW, the DSL reg (0x70000) appears to return 0 if we
742 * read it just before the start of vblank. So try it again
743 * so we don't accidentally end up spanning a vblank frame
744 * increment, causing the pipe_update_end() code to squak at us.
745 *
746 * The nature of this problem means we can't simply check the ISR
747 * bit and return the vblank start value; nor can we use the scanline
748 * debug register in the transcoder as it appears to have the same
749 * problem. We may need to extend this to include other platforms,
750 * but so far testing only shows the problem on HSW.
751 */
752 if (HAS_DDI(dev) && !position) {
753 int i, temp;
754
755 for (i = 0; i < 100; i++) {
756 udelay(1);
757 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
758 DSL_LINEMASK_GEN3;
759 if (temp != position) {
760 position = temp;
761 break;
762 }
763 }
764 }
765
766 /*
767 * See update_scanline_offset() for the details on the
768 * scanline_offset adjustment.
769 */
770 return (position + crtc->scanline_offset) % vtotal;
771 }
772
773 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
774 unsigned int flags, int *vpos, int *hpos,
775 ktime_t *stime, ktime_t *etime,
776 const struct drm_display_mode *mode)
777 {
778 struct drm_i915_private *dev_priv = dev->dev_private;
779 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
780 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
781 int position;
782 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
783 bool in_vbl = true;
784 int ret = 0;
785 unsigned long irqflags;
786
787 if (WARN_ON(!mode->crtc_clock)) {
788 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
789 "pipe %c\n", pipe_name(pipe));
790 return 0;
791 }
792
793 htotal = mode->crtc_htotal;
794 hsync_start = mode->crtc_hsync_start;
795 vtotal = mode->crtc_vtotal;
796 vbl_start = mode->crtc_vblank_start;
797 vbl_end = mode->crtc_vblank_end;
798
799 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
800 vbl_start = DIV_ROUND_UP(vbl_start, 2);
801 vbl_end /= 2;
802 vtotal /= 2;
803 }
804
805 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
806
807 /*
808 * Lock uncore.lock, as we will do multiple timing critical raw
809 * register reads, potentially with preemption disabled, so the
810 * following code must not block on uncore.lock.
811 */
812 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
813
814 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
815
816 /* Get optional system timestamp before query. */
817 if (stime)
818 *stime = ktime_get();
819
820 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
821 /* No obvious pixelcount register. Only query vertical
822 * scanout position from Display scan line register.
823 */
824 position = __intel_get_crtc_scanline(intel_crtc);
825 } else {
826 /* Have access to pixelcount since start of frame.
827 * We can split this into vertical and horizontal
828 * scanout position.
829 */
830 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
831
832 /* convert to pixel counts */
833 vbl_start *= htotal;
834 vbl_end *= htotal;
835 vtotal *= htotal;
836
837 /*
838 * In interlaced modes, the pixel counter counts all pixels,
839 * so one field will have htotal more pixels. In order to avoid
840 * the reported position from jumping backwards when the pixel
841 * counter is beyond the length of the shorter field, just
842 * clamp the position the length of the shorter field. This
843 * matches how the scanline counter based position works since
844 * the scanline counter doesn't count the two half lines.
845 */
846 if (position >= vtotal)
847 position = vtotal - 1;
848
849 /*
850 * Start of vblank interrupt is triggered at start of hsync,
851 * just prior to the first active line of vblank. However we
852 * consider lines to start at the leading edge of horizontal
853 * active. So, should we get here before we've crossed into
854 * the horizontal active of the first line in vblank, we would
855 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
856 * always add htotal-hsync_start to the current pixel position.
857 */
858 position = (position + htotal - hsync_start) % vtotal;
859 }
860
861 /* Get optional system timestamp after query. */
862 if (etime)
863 *etime = ktime_get();
864
865 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
866
867 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
868
869 in_vbl = position >= vbl_start && position < vbl_end;
870
871 /*
872 * While in vblank, position will be negative
873 * counting up towards 0 at vbl_end. And outside
874 * vblank, position will be positive counting
875 * up since vbl_end.
876 */
877 if (position >= vbl_start)
878 position -= vbl_end;
879 else
880 position += vtotal - vbl_end;
881
882 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
883 *vpos = position;
884 *hpos = 0;
885 } else {
886 *vpos = position / htotal;
887 *hpos = position - (*vpos * htotal);
888 }
889
890 /* In vblank? */
891 if (in_vbl)
892 ret |= DRM_SCANOUTPOS_IN_VBLANK;
893
894 return ret;
895 }
896
897 int intel_get_crtc_scanline(struct intel_crtc *crtc)
898 {
899 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
900 unsigned long irqflags;
901 int position;
902
903 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
904 position = __intel_get_crtc_scanline(crtc);
905 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
906
907 return position;
908 }
909
910 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
911 int *max_error,
912 struct timeval *vblank_time,
913 unsigned flags)
914 {
915 struct drm_crtc *crtc;
916
917 if (pipe >= INTEL_INFO(dev)->num_pipes) {
918 DRM_ERROR("Invalid crtc %u\n", pipe);
919 return -EINVAL;
920 }
921
922 /* Get drm_crtc to timestamp: */
923 crtc = intel_get_crtc_for_pipe(dev, pipe);
924 if (crtc == NULL) {
925 DRM_ERROR("Invalid crtc %u\n", pipe);
926 return -EINVAL;
927 }
928
929 if (!crtc->hwmode.crtc_clock) {
930 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
931 return -EBUSY;
932 }
933
934 /* Helper routine in DRM core does all the work: */
935 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
936 vblank_time, flags,
937 &crtc->hwmode);
938 }
939
940 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
941 {
942 struct drm_i915_private *dev_priv = dev->dev_private;
943 u32 busy_up, busy_down, max_avg, min_avg;
944 u8 new_delay;
945
946 spin_lock(&mchdev_lock);
947
948 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
949
950 new_delay = dev_priv->ips.cur_delay;
951
952 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
953 busy_up = I915_READ(RCPREVBSYTUPAVG);
954 busy_down = I915_READ(RCPREVBSYTDNAVG);
955 max_avg = I915_READ(RCBMAXAVG);
956 min_avg = I915_READ(RCBMINAVG);
957
958 /* Handle RCS change request from hw */
959 if (busy_up > max_avg) {
960 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
961 new_delay = dev_priv->ips.cur_delay - 1;
962 if (new_delay < dev_priv->ips.max_delay)
963 new_delay = dev_priv->ips.max_delay;
964 } else if (busy_down < min_avg) {
965 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
966 new_delay = dev_priv->ips.cur_delay + 1;
967 if (new_delay > dev_priv->ips.min_delay)
968 new_delay = dev_priv->ips.min_delay;
969 }
970
971 if (ironlake_set_drps(dev, new_delay))
972 dev_priv->ips.cur_delay = new_delay;
973
974 spin_unlock(&mchdev_lock);
975
976 return;
977 }
978
979 static void notify_ring(struct intel_engine_cs *ring)
980 {
981 if (!intel_ring_initialized(ring))
982 return;
983
984 trace_i915_gem_request_notify(ring);
985
986 wake_up_all(&ring->irq_queue);
987 }
988
989 static void vlv_c0_read(struct drm_i915_private *dev_priv,
990 struct intel_rps_ei *ei)
991 {
992 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
993 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
994 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
995 }
996
997 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
998 const struct intel_rps_ei *old,
999 const struct intel_rps_ei *now,
1000 int threshold)
1001 {
1002 u64 time, c0;
1003 unsigned int mul = 100;
1004
1005 if (old->cz_clock == 0)
1006 return false;
1007
1008 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1009 mul <<= 8;
1010
1011 time = now->cz_clock - old->cz_clock;
1012 time *= threshold * dev_priv->czclk_freq;
1013
1014 /* Workload can be split between render + media, e.g. SwapBuffers
1015 * being blitted in X after being rendered in mesa. To account for
1016 * this we need to combine both engines into our activity counter.
1017 */
1018 c0 = now->render_c0 - old->render_c0;
1019 c0 += now->media_c0 - old->media_c0;
1020 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1021
1022 return c0 >= time;
1023 }
1024
1025 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1026 {
1027 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1028 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1029 }
1030
1031 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1032 {
1033 struct intel_rps_ei now;
1034 u32 events = 0;
1035
1036 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1037 return 0;
1038
1039 vlv_c0_read(dev_priv, &now);
1040 if (now.cz_clock == 0)
1041 return 0;
1042
1043 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1044 if (!vlv_c0_above(dev_priv,
1045 &dev_priv->rps.down_ei, &now,
1046 dev_priv->rps.down_threshold))
1047 events |= GEN6_PM_RP_DOWN_THRESHOLD;
1048 dev_priv->rps.down_ei = now;
1049 }
1050
1051 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1052 if (vlv_c0_above(dev_priv,
1053 &dev_priv->rps.up_ei, &now,
1054 dev_priv->rps.up_threshold))
1055 events |= GEN6_PM_RP_UP_THRESHOLD;
1056 dev_priv->rps.up_ei = now;
1057 }
1058
1059 return events;
1060 }
1061
1062 static bool any_waiters(struct drm_i915_private *dev_priv)
1063 {
1064 struct intel_engine_cs *ring;
1065 int i;
1066
1067 for_each_ring(ring, dev_priv, i)
1068 if (ring->irq_refcount)
1069 return true;
1070
1071 return false;
1072 }
1073
1074 static void gen6_pm_rps_work(struct work_struct *work)
1075 {
1076 struct drm_i915_private *dev_priv =
1077 container_of(work, struct drm_i915_private, rps.work);
1078 bool client_boost;
1079 int new_delay, adj, min, max;
1080 u32 pm_iir;
1081
1082 spin_lock_irq(&dev_priv->irq_lock);
1083 /* Speed up work cancelation during disabling rps interrupts. */
1084 if (!dev_priv->rps.interrupts_enabled) {
1085 spin_unlock_irq(&dev_priv->irq_lock);
1086 return;
1087 }
1088 pm_iir = dev_priv->rps.pm_iir;
1089 dev_priv->rps.pm_iir = 0;
1090 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1091 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1092 client_boost = dev_priv->rps.client_boost;
1093 dev_priv->rps.client_boost = false;
1094 spin_unlock_irq(&dev_priv->irq_lock);
1095
1096 /* Make sure we didn't queue anything we're not going to process. */
1097 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1098
1099 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1100 return;
1101
1102 mutex_lock(&dev_priv->rps.hw_lock);
1103
1104 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1105
1106 adj = dev_priv->rps.last_adj;
1107 new_delay = dev_priv->rps.cur_freq;
1108 min = dev_priv->rps.min_freq_softlimit;
1109 max = dev_priv->rps.max_freq_softlimit;
1110
1111 if (client_boost) {
1112 new_delay = dev_priv->rps.max_freq_softlimit;
1113 adj = 0;
1114 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1115 if (adj > 0)
1116 adj *= 2;
1117 else /* CHV needs even encode values */
1118 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1119 /*
1120 * For better performance, jump directly
1121 * to RPe if we're below it.
1122 */
1123 if (new_delay < dev_priv->rps.efficient_freq - adj) {
1124 new_delay = dev_priv->rps.efficient_freq;
1125 adj = 0;
1126 }
1127 } else if (any_waiters(dev_priv)) {
1128 adj = 0;
1129 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1130 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1131 new_delay = dev_priv->rps.efficient_freq;
1132 else
1133 new_delay = dev_priv->rps.min_freq_softlimit;
1134 adj = 0;
1135 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1136 if (adj < 0)
1137 adj *= 2;
1138 else /* CHV needs even encode values */
1139 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1140 } else { /* unknown event */
1141 adj = 0;
1142 }
1143
1144 dev_priv->rps.last_adj = adj;
1145
1146 /* sysfs frequency interfaces may have snuck in while servicing the
1147 * interrupt
1148 */
1149 new_delay += adj;
1150 new_delay = clamp_t(int, new_delay, min, max);
1151
1152 intel_set_rps(dev_priv->dev, new_delay);
1153
1154 mutex_unlock(&dev_priv->rps.hw_lock);
1155 }
1156
1157
1158 /**
1159 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1160 * occurred.
1161 * @work: workqueue struct
1162 *
1163 * Doesn't actually do anything except notify userspace. As a consequence of
1164 * this event, userspace should try to remap the bad rows since statistically
1165 * it is likely the same row is more likely to go bad again.
1166 */
1167 static void ivybridge_parity_work(struct work_struct *work)
1168 {
1169 struct drm_i915_private *dev_priv =
1170 container_of(work, struct drm_i915_private, l3_parity.error_work);
1171 u32 error_status, row, bank, subbank;
1172 char *parity_event[6];
1173 uint32_t misccpctl;
1174 uint8_t slice = 0;
1175
1176 /* We must turn off DOP level clock gating to access the L3 registers.
1177 * In order to prevent a get/put style interface, acquire struct mutex
1178 * any time we access those registers.
1179 */
1180 mutex_lock(&dev_priv->dev->struct_mutex);
1181
1182 /* If we've screwed up tracking, just let the interrupt fire again */
1183 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1184 goto out;
1185
1186 misccpctl = I915_READ(GEN7_MISCCPCTL);
1187 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1188 POSTING_READ(GEN7_MISCCPCTL);
1189
1190 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1191 u32 reg;
1192
1193 slice--;
1194 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1195 break;
1196
1197 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1198
1199 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1200
1201 error_status = I915_READ(reg);
1202 row = GEN7_PARITY_ERROR_ROW(error_status);
1203 bank = GEN7_PARITY_ERROR_BANK(error_status);
1204 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1205
1206 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1207 POSTING_READ(reg);
1208
1209 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1210 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1211 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1212 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1213 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1214 parity_event[5] = NULL;
1215
1216 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1217 KOBJ_CHANGE, parity_event);
1218
1219 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1220 slice, row, bank, subbank);
1221
1222 kfree(parity_event[4]);
1223 kfree(parity_event[3]);
1224 kfree(parity_event[2]);
1225 kfree(parity_event[1]);
1226 }
1227
1228 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1229
1230 out:
1231 WARN_ON(dev_priv->l3_parity.which_slice);
1232 spin_lock_irq(&dev_priv->irq_lock);
1233 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1234 spin_unlock_irq(&dev_priv->irq_lock);
1235
1236 mutex_unlock(&dev_priv->dev->struct_mutex);
1237 }
1238
1239 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1240 {
1241 struct drm_i915_private *dev_priv = dev->dev_private;
1242
1243 if (!HAS_L3_DPF(dev))
1244 return;
1245
1246 spin_lock(&dev_priv->irq_lock);
1247 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1248 spin_unlock(&dev_priv->irq_lock);
1249
1250 iir &= GT_PARITY_ERROR(dev);
1251 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1252 dev_priv->l3_parity.which_slice |= 1 << 1;
1253
1254 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1255 dev_priv->l3_parity.which_slice |= 1 << 0;
1256
1257 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1258 }
1259
1260 static void ilk_gt_irq_handler(struct drm_device *dev,
1261 struct drm_i915_private *dev_priv,
1262 u32 gt_iir)
1263 {
1264 if (gt_iir &
1265 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1266 notify_ring(&dev_priv->ring[RCS]);
1267 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1268 notify_ring(&dev_priv->ring[VCS]);
1269 }
1270
1271 static void snb_gt_irq_handler(struct drm_device *dev,
1272 struct drm_i915_private *dev_priv,
1273 u32 gt_iir)
1274 {
1275
1276 if (gt_iir &
1277 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1278 notify_ring(&dev_priv->ring[RCS]);
1279 if (gt_iir & GT_BSD_USER_INTERRUPT)
1280 notify_ring(&dev_priv->ring[VCS]);
1281 if (gt_iir & GT_BLT_USER_INTERRUPT)
1282 notify_ring(&dev_priv->ring[BCS]);
1283
1284 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1285 GT_BSD_CS_ERROR_INTERRUPT |
1286 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1287 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1288
1289 if (gt_iir & GT_PARITY_ERROR(dev))
1290 ivybridge_parity_error_irq_handler(dev, gt_iir);
1291 }
1292
1293 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1294 u32 master_ctl)
1295 {
1296 irqreturn_t ret = IRQ_NONE;
1297
1298 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1299 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
1300 if (tmp) {
1301 I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
1302 ret = IRQ_HANDLED;
1303
1304 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1305 intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1306 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1307 notify_ring(&dev_priv->ring[RCS]);
1308
1309 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1310 intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1311 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1312 notify_ring(&dev_priv->ring[BCS]);
1313 } else
1314 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1315 }
1316
1317 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1318 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
1319 if (tmp) {
1320 I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
1321 ret = IRQ_HANDLED;
1322
1323 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1324 intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1325 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1326 notify_ring(&dev_priv->ring[VCS]);
1327
1328 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1329 intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1330 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1331 notify_ring(&dev_priv->ring[VCS2]);
1332 } else
1333 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1334 }
1335
1336 if (master_ctl & GEN8_GT_VECS_IRQ) {
1337 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
1338 if (tmp) {
1339 I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
1340 ret = IRQ_HANDLED;
1341
1342 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1343 intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1344 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1345 notify_ring(&dev_priv->ring[VECS]);
1346 } else
1347 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1348 }
1349
1350 if (master_ctl & GEN8_GT_PM_IRQ) {
1351 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
1352 if (tmp & dev_priv->pm_rps_events) {
1353 I915_WRITE_FW(GEN8_GT_IIR(2),
1354 tmp & dev_priv->pm_rps_events);
1355 ret = IRQ_HANDLED;
1356 gen6_rps_irq_handler(dev_priv, tmp);
1357 } else
1358 DRM_ERROR("The master control interrupt lied (PM)!\n");
1359 }
1360
1361 return ret;
1362 }
1363
1364 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1365 {
1366 switch (port) {
1367 case PORT_A:
1368 return val & PORTA_HOTPLUG_LONG_DETECT;
1369 case PORT_B:
1370 return val & PORTB_HOTPLUG_LONG_DETECT;
1371 case PORT_C:
1372 return val & PORTC_HOTPLUG_LONG_DETECT;
1373 default:
1374 return false;
1375 }
1376 }
1377
1378 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1379 {
1380 switch (port) {
1381 case PORT_E:
1382 return val & PORTE_HOTPLUG_LONG_DETECT;
1383 default:
1384 return false;
1385 }
1386 }
1387
1388 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1389 {
1390 switch (port) {
1391 case PORT_A:
1392 return val & PORTA_HOTPLUG_LONG_DETECT;
1393 case PORT_B:
1394 return val & PORTB_HOTPLUG_LONG_DETECT;
1395 case PORT_C:
1396 return val & PORTC_HOTPLUG_LONG_DETECT;
1397 case PORT_D:
1398 return val & PORTD_HOTPLUG_LONG_DETECT;
1399 default:
1400 return false;
1401 }
1402 }
1403
1404 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1405 {
1406 switch (port) {
1407 case PORT_A:
1408 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1409 default:
1410 return false;
1411 }
1412 }
1413
1414 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1415 {
1416 switch (port) {
1417 case PORT_B:
1418 return val & PORTB_HOTPLUG_LONG_DETECT;
1419 case PORT_C:
1420 return val & PORTC_HOTPLUG_LONG_DETECT;
1421 case PORT_D:
1422 return val & PORTD_HOTPLUG_LONG_DETECT;
1423 default:
1424 return false;
1425 }
1426 }
1427
1428 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1429 {
1430 switch (port) {
1431 case PORT_B:
1432 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1433 case PORT_C:
1434 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1435 case PORT_D:
1436 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1437 default:
1438 return false;
1439 }
1440 }
1441
1442 /*
1443 * Get a bit mask of pins that have triggered, and which ones may be long.
1444 * This can be called multiple times with the same masks to accumulate
1445 * hotplug detection results from several registers.
1446 *
1447 * Note that the caller is expected to zero out the masks initially.
1448 */
1449 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1450 u32 hotplug_trigger, u32 dig_hotplug_reg,
1451 const u32 hpd[HPD_NUM_PINS],
1452 bool long_pulse_detect(enum port port, u32 val))
1453 {
1454 enum port port;
1455 int i;
1456
1457 for_each_hpd_pin(i) {
1458 if ((hpd[i] & hotplug_trigger) == 0)
1459 continue;
1460
1461 *pin_mask |= BIT(i);
1462
1463 if (!intel_hpd_pin_to_port(i, &port))
1464 continue;
1465
1466 if (long_pulse_detect(port, dig_hotplug_reg))
1467 *long_mask |= BIT(i);
1468 }
1469
1470 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1471 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1472
1473 }
1474
1475 static void gmbus_irq_handler(struct drm_device *dev)
1476 {
1477 struct drm_i915_private *dev_priv = dev->dev_private;
1478
1479 wake_up_all(&dev_priv->gmbus_wait_queue);
1480 }
1481
1482 static void dp_aux_irq_handler(struct drm_device *dev)
1483 {
1484 struct drm_i915_private *dev_priv = dev->dev_private;
1485
1486 wake_up_all(&dev_priv->gmbus_wait_queue);
1487 }
1488
1489 #if defined(CONFIG_DEBUG_FS)
1490 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1491 uint32_t crc0, uint32_t crc1,
1492 uint32_t crc2, uint32_t crc3,
1493 uint32_t crc4)
1494 {
1495 struct drm_i915_private *dev_priv = dev->dev_private;
1496 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1497 struct intel_pipe_crc_entry *entry;
1498 int head, tail;
1499
1500 spin_lock(&pipe_crc->lock);
1501
1502 if (!pipe_crc->entries) {
1503 spin_unlock(&pipe_crc->lock);
1504 DRM_DEBUG_KMS("spurious interrupt\n");
1505 return;
1506 }
1507
1508 head = pipe_crc->head;
1509 tail = pipe_crc->tail;
1510
1511 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1512 spin_unlock(&pipe_crc->lock);
1513 DRM_ERROR("CRC buffer overflowing\n");
1514 return;
1515 }
1516
1517 entry = &pipe_crc->entries[head];
1518
1519 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1520 entry->crc[0] = crc0;
1521 entry->crc[1] = crc1;
1522 entry->crc[2] = crc2;
1523 entry->crc[3] = crc3;
1524 entry->crc[4] = crc4;
1525
1526 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1527 pipe_crc->head = head;
1528
1529 spin_unlock(&pipe_crc->lock);
1530
1531 wake_up_interruptible(&pipe_crc->wq);
1532 }
1533 #else
1534 static inline void
1535 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1536 uint32_t crc0, uint32_t crc1,
1537 uint32_t crc2, uint32_t crc3,
1538 uint32_t crc4) {}
1539 #endif
1540
1541
1542 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1543 {
1544 struct drm_i915_private *dev_priv = dev->dev_private;
1545
1546 display_pipe_crc_irq_handler(dev, pipe,
1547 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1548 0, 0, 0, 0);
1549 }
1550
1551 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1552 {
1553 struct drm_i915_private *dev_priv = dev->dev_private;
1554
1555 display_pipe_crc_irq_handler(dev, pipe,
1556 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1557 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1558 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1559 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1560 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1561 }
1562
1563 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1564 {
1565 struct drm_i915_private *dev_priv = dev->dev_private;
1566 uint32_t res1, res2;
1567
1568 if (INTEL_INFO(dev)->gen >= 3)
1569 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1570 else
1571 res1 = 0;
1572
1573 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1574 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1575 else
1576 res2 = 0;
1577
1578 display_pipe_crc_irq_handler(dev, pipe,
1579 I915_READ(PIPE_CRC_RES_RED(pipe)),
1580 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1581 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1582 res1, res2);
1583 }
1584
1585 /* The RPS events need forcewake, so we add them to a work queue and mask their
1586 * IMR bits until the work is done. Other interrupts can be processed without
1587 * the work queue. */
1588 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1589 {
1590 if (pm_iir & dev_priv->pm_rps_events) {
1591 spin_lock(&dev_priv->irq_lock);
1592 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1593 if (dev_priv->rps.interrupts_enabled) {
1594 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1595 queue_work(dev_priv->wq, &dev_priv->rps.work);
1596 }
1597 spin_unlock(&dev_priv->irq_lock);
1598 }
1599
1600 if (INTEL_INFO(dev_priv)->gen >= 8)
1601 return;
1602
1603 if (HAS_VEBOX(dev_priv->dev)) {
1604 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1605 notify_ring(&dev_priv->ring[VECS]);
1606
1607 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1608 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1609 }
1610 }
1611
1612 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1613 {
1614 if (!drm_handle_vblank(dev, pipe))
1615 return false;
1616
1617 return true;
1618 }
1619
1620 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1621 {
1622 struct drm_i915_private *dev_priv = dev->dev_private;
1623 u32 pipe_stats[I915_MAX_PIPES] = { };
1624 int pipe;
1625
1626 spin_lock(&dev_priv->irq_lock);
1627 for_each_pipe(dev_priv, pipe) {
1628 int reg;
1629 u32 mask, iir_bit = 0;
1630
1631 /*
1632 * PIPESTAT bits get signalled even when the interrupt is
1633 * disabled with the mask bits, and some of the status bits do
1634 * not generate interrupts at all (like the underrun bit). Hence
1635 * we need to be careful that we only handle what we want to
1636 * handle.
1637 */
1638
1639 /* fifo underruns are filterered in the underrun handler. */
1640 mask = PIPE_FIFO_UNDERRUN_STATUS;
1641
1642 switch (pipe) {
1643 case PIPE_A:
1644 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1645 break;
1646 case PIPE_B:
1647 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1648 break;
1649 case PIPE_C:
1650 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1651 break;
1652 }
1653 if (iir & iir_bit)
1654 mask |= dev_priv->pipestat_irq_mask[pipe];
1655
1656 if (!mask)
1657 continue;
1658
1659 reg = PIPESTAT(pipe);
1660 mask |= PIPESTAT_INT_ENABLE_MASK;
1661 pipe_stats[pipe] = I915_READ(reg) & mask;
1662
1663 /*
1664 * Clear the PIPE*STAT regs before the IIR
1665 */
1666 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1667 PIPESTAT_INT_STATUS_MASK))
1668 I915_WRITE(reg, pipe_stats[pipe]);
1669 }
1670 spin_unlock(&dev_priv->irq_lock);
1671
1672 for_each_pipe(dev_priv, pipe) {
1673 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1674 intel_pipe_handle_vblank(dev, pipe))
1675 intel_check_page_flip(dev, pipe);
1676
1677 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1678 intel_prepare_page_flip(dev, pipe);
1679 intel_finish_page_flip(dev, pipe);
1680 }
1681
1682 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1683 i9xx_pipe_crc_irq_handler(dev, pipe);
1684
1685 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1686 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1687 }
1688
1689 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1690 gmbus_irq_handler(dev);
1691 }
1692
1693 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1694 {
1695 struct drm_i915_private *dev_priv = dev->dev_private;
1696 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1697 u32 pin_mask = 0, long_mask = 0;
1698
1699 if (!hotplug_status)
1700 return;
1701
1702 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1703 /*
1704 * Make sure hotplug status is cleared before we clear IIR, or else we
1705 * may miss hotplug events.
1706 */
1707 POSTING_READ(PORT_HOTPLUG_STAT);
1708
1709 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1710 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1711
1712 if (hotplug_trigger) {
1713 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1714 hotplug_trigger, hpd_status_g4x,
1715 i9xx_port_hotplug_long_detect);
1716
1717 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1718 }
1719
1720 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1721 dp_aux_irq_handler(dev);
1722 } else {
1723 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1724
1725 if (hotplug_trigger) {
1726 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1727 hotplug_trigger, hpd_status_i915,
1728 i9xx_port_hotplug_long_detect);
1729 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1730 }
1731 }
1732 }
1733
1734 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1735 {
1736 struct drm_device *dev = arg;
1737 struct drm_i915_private *dev_priv = dev->dev_private;
1738 u32 iir, gt_iir, pm_iir;
1739 irqreturn_t ret = IRQ_NONE;
1740
1741 if (!intel_irqs_enabled(dev_priv))
1742 return IRQ_NONE;
1743
1744 while (true) {
1745 /* Find, clear, then process each source of interrupt */
1746
1747 gt_iir = I915_READ(GTIIR);
1748 if (gt_iir)
1749 I915_WRITE(GTIIR, gt_iir);
1750
1751 pm_iir = I915_READ(GEN6_PMIIR);
1752 if (pm_iir)
1753 I915_WRITE(GEN6_PMIIR, pm_iir);
1754
1755 iir = I915_READ(VLV_IIR);
1756 if (iir) {
1757 /* Consume port before clearing IIR or we'll miss events */
1758 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1759 i9xx_hpd_irq_handler(dev);
1760 I915_WRITE(VLV_IIR, iir);
1761 }
1762
1763 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1764 goto out;
1765
1766 ret = IRQ_HANDLED;
1767
1768 if (gt_iir)
1769 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1770 if (pm_iir)
1771 gen6_rps_irq_handler(dev_priv, pm_iir);
1772 /* Call regardless, as some status bits might not be
1773 * signalled in iir */
1774 valleyview_pipestat_irq_handler(dev, iir);
1775 }
1776
1777 out:
1778 return ret;
1779 }
1780
1781 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1782 {
1783 struct drm_device *dev = arg;
1784 struct drm_i915_private *dev_priv = dev->dev_private;
1785 u32 master_ctl, iir;
1786 irqreturn_t ret = IRQ_NONE;
1787
1788 if (!intel_irqs_enabled(dev_priv))
1789 return IRQ_NONE;
1790
1791 for (;;) {
1792 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1793 iir = I915_READ(VLV_IIR);
1794
1795 if (master_ctl == 0 && iir == 0)
1796 break;
1797
1798 ret = IRQ_HANDLED;
1799
1800 I915_WRITE(GEN8_MASTER_IRQ, 0);
1801
1802 /* Find, clear, then process each source of interrupt */
1803
1804 if (iir) {
1805 /* Consume port before clearing IIR or we'll miss events */
1806 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1807 i9xx_hpd_irq_handler(dev);
1808 I915_WRITE(VLV_IIR, iir);
1809 }
1810
1811 gen8_gt_irq_handler(dev_priv, master_ctl);
1812
1813 /* Call regardless, as some status bits might not be
1814 * signalled in iir */
1815 valleyview_pipestat_irq_handler(dev, iir);
1816
1817 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1818 POSTING_READ(GEN8_MASTER_IRQ);
1819 }
1820
1821 return ret;
1822 }
1823
1824 static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1825 const u32 hpd[HPD_NUM_PINS])
1826 {
1827 struct drm_i915_private *dev_priv = to_i915(dev);
1828 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1829
1830 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1831 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1832
1833 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1834 dig_hotplug_reg, hpd,
1835 pch_port_hotplug_long_detect);
1836
1837 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1838 }
1839
1840 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1841 {
1842 struct drm_i915_private *dev_priv = dev->dev_private;
1843 int pipe;
1844 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1845
1846 if (hotplug_trigger)
1847 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1848
1849 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1850 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1851 SDE_AUDIO_POWER_SHIFT);
1852 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1853 port_name(port));
1854 }
1855
1856 if (pch_iir & SDE_AUX_MASK)
1857 dp_aux_irq_handler(dev);
1858
1859 if (pch_iir & SDE_GMBUS)
1860 gmbus_irq_handler(dev);
1861
1862 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1863 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1864
1865 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1866 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1867
1868 if (pch_iir & SDE_POISON)
1869 DRM_ERROR("PCH poison interrupt\n");
1870
1871 if (pch_iir & SDE_FDI_MASK)
1872 for_each_pipe(dev_priv, pipe)
1873 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1874 pipe_name(pipe),
1875 I915_READ(FDI_RX_IIR(pipe)));
1876
1877 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1878 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1879
1880 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1881 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1882
1883 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1884 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1885
1886 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1887 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1888 }
1889
1890 static void ivb_err_int_handler(struct drm_device *dev)
1891 {
1892 struct drm_i915_private *dev_priv = dev->dev_private;
1893 u32 err_int = I915_READ(GEN7_ERR_INT);
1894 enum pipe pipe;
1895
1896 if (err_int & ERR_INT_POISON)
1897 DRM_ERROR("Poison interrupt\n");
1898
1899 for_each_pipe(dev_priv, pipe) {
1900 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1901 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1902
1903 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1904 if (IS_IVYBRIDGE(dev))
1905 ivb_pipe_crc_irq_handler(dev, pipe);
1906 else
1907 hsw_pipe_crc_irq_handler(dev, pipe);
1908 }
1909 }
1910
1911 I915_WRITE(GEN7_ERR_INT, err_int);
1912 }
1913
1914 static void cpt_serr_int_handler(struct drm_device *dev)
1915 {
1916 struct drm_i915_private *dev_priv = dev->dev_private;
1917 u32 serr_int = I915_READ(SERR_INT);
1918
1919 if (serr_int & SERR_INT_POISON)
1920 DRM_ERROR("PCH poison interrupt\n");
1921
1922 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1923 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1924
1925 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1926 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1927
1928 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1929 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1930
1931 I915_WRITE(SERR_INT, serr_int);
1932 }
1933
1934 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1935 {
1936 struct drm_i915_private *dev_priv = dev->dev_private;
1937 int pipe;
1938 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1939
1940 if (hotplug_trigger)
1941 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1942
1943 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1944 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1945 SDE_AUDIO_POWER_SHIFT_CPT);
1946 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1947 port_name(port));
1948 }
1949
1950 if (pch_iir & SDE_AUX_MASK_CPT)
1951 dp_aux_irq_handler(dev);
1952
1953 if (pch_iir & SDE_GMBUS_CPT)
1954 gmbus_irq_handler(dev);
1955
1956 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1957 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1958
1959 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1960 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1961
1962 if (pch_iir & SDE_FDI_MASK_CPT)
1963 for_each_pipe(dev_priv, pipe)
1964 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1965 pipe_name(pipe),
1966 I915_READ(FDI_RX_IIR(pipe)));
1967
1968 if (pch_iir & SDE_ERROR_CPT)
1969 cpt_serr_int_handler(dev);
1970 }
1971
1972 static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
1973 {
1974 struct drm_i915_private *dev_priv = dev->dev_private;
1975 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1976 ~SDE_PORTE_HOTPLUG_SPT;
1977 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1978 u32 pin_mask = 0, long_mask = 0;
1979
1980 if (hotplug_trigger) {
1981 u32 dig_hotplug_reg;
1982
1983 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1984 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1985
1986 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1987 dig_hotplug_reg, hpd_spt,
1988 spt_port_hotplug_long_detect);
1989 }
1990
1991 if (hotplug2_trigger) {
1992 u32 dig_hotplug_reg;
1993
1994 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1995 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1996
1997 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
1998 dig_hotplug_reg, hpd_spt,
1999 spt_port_hotplug2_long_detect);
2000 }
2001
2002 if (pin_mask)
2003 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2004
2005 if (pch_iir & SDE_GMBUS_CPT)
2006 gmbus_irq_handler(dev);
2007 }
2008
2009 static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2010 const u32 hpd[HPD_NUM_PINS])
2011 {
2012 struct drm_i915_private *dev_priv = to_i915(dev);
2013 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2014
2015 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2016 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2017
2018 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2019 dig_hotplug_reg, hpd,
2020 ilk_port_hotplug_long_detect);
2021
2022 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2023 }
2024
2025 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2026 {
2027 struct drm_i915_private *dev_priv = dev->dev_private;
2028 enum pipe pipe;
2029 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2030
2031 if (hotplug_trigger)
2032 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
2033
2034 if (de_iir & DE_AUX_CHANNEL_A)
2035 dp_aux_irq_handler(dev);
2036
2037 if (de_iir & DE_GSE)
2038 intel_opregion_asle_intr(dev);
2039
2040 if (de_iir & DE_POISON)
2041 DRM_ERROR("Poison interrupt\n");
2042
2043 for_each_pipe(dev_priv, pipe) {
2044 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2045 intel_pipe_handle_vblank(dev, pipe))
2046 intel_check_page_flip(dev, pipe);
2047
2048 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2049 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2050
2051 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2052 i9xx_pipe_crc_irq_handler(dev, pipe);
2053
2054 /* plane/pipes map 1:1 on ilk+ */
2055 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2056 intel_prepare_page_flip(dev, pipe);
2057 intel_finish_page_flip_plane(dev, pipe);
2058 }
2059 }
2060
2061 /* check event from PCH */
2062 if (de_iir & DE_PCH_EVENT) {
2063 u32 pch_iir = I915_READ(SDEIIR);
2064
2065 if (HAS_PCH_CPT(dev))
2066 cpt_irq_handler(dev, pch_iir);
2067 else
2068 ibx_irq_handler(dev, pch_iir);
2069
2070 /* should clear PCH hotplug event before clear CPU irq */
2071 I915_WRITE(SDEIIR, pch_iir);
2072 }
2073
2074 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2075 ironlake_rps_change_irq_handler(dev);
2076 }
2077
2078 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2079 {
2080 struct drm_i915_private *dev_priv = dev->dev_private;
2081 enum pipe pipe;
2082 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2083
2084 if (hotplug_trigger)
2085 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2086
2087 if (de_iir & DE_ERR_INT_IVB)
2088 ivb_err_int_handler(dev);
2089
2090 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2091 dp_aux_irq_handler(dev);
2092
2093 if (de_iir & DE_GSE_IVB)
2094 intel_opregion_asle_intr(dev);
2095
2096 for_each_pipe(dev_priv, pipe) {
2097 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2098 intel_pipe_handle_vblank(dev, pipe))
2099 intel_check_page_flip(dev, pipe);
2100
2101 /* plane/pipes map 1:1 on ilk+ */
2102 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2103 intel_prepare_page_flip(dev, pipe);
2104 intel_finish_page_flip_plane(dev, pipe);
2105 }
2106 }
2107
2108 /* check event from PCH */
2109 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2110 u32 pch_iir = I915_READ(SDEIIR);
2111
2112 cpt_irq_handler(dev, pch_iir);
2113
2114 /* clear PCH hotplug event before clear CPU irq */
2115 I915_WRITE(SDEIIR, pch_iir);
2116 }
2117 }
2118
2119 /*
2120 * To handle irqs with the minimum potential races with fresh interrupts, we:
2121 * 1 - Disable Master Interrupt Control.
2122 * 2 - Find the source(s) of the interrupt.
2123 * 3 - Clear the Interrupt Identity bits (IIR).
2124 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2125 * 5 - Re-enable Master Interrupt Control.
2126 */
2127 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2128 {
2129 struct drm_device *dev = arg;
2130 struct drm_i915_private *dev_priv = dev->dev_private;
2131 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2132 irqreturn_t ret = IRQ_NONE;
2133
2134 if (!intel_irqs_enabled(dev_priv))
2135 return IRQ_NONE;
2136
2137 /* We get interrupts on unclaimed registers, so check for this before we
2138 * do any I915_{READ,WRITE}. */
2139 intel_uncore_check_errors(dev);
2140
2141 /* disable master interrupt before clearing iir */
2142 de_ier = I915_READ(DEIER);
2143 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2144 POSTING_READ(DEIER);
2145
2146 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2147 * interrupts will will be stored on its back queue, and then we'll be
2148 * able to process them after we restore SDEIER (as soon as we restore
2149 * it, we'll get an interrupt if SDEIIR still has something to process
2150 * due to its back queue). */
2151 if (!HAS_PCH_NOP(dev)) {
2152 sde_ier = I915_READ(SDEIER);
2153 I915_WRITE(SDEIER, 0);
2154 POSTING_READ(SDEIER);
2155 }
2156
2157 /* Find, clear, then process each source of interrupt */
2158
2159 gt_iir = I915_READ(GTIIR);
2160 if (gt_iir) {
2161 I915_WRITE(GTIIR, gt_iir);
2162 ret = IRQ_HANDLED;
2163 if (INTEL_INFO(dev)->gen >= 6)
2164 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2165 else
2166 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2167 }
2168
2169 de_iir = I915_READ(DEIIR);
2170 if (de_iir) {
2171 I915_WRITE(DEIIR, de_iir);
2172 ret = IRQ_HANDLED;
2173 if (INTEL_INFO(dev)->gen >= 7)
2174 ivb_display_irq_handler(dev, de_iir);
2175 else
2176 ilk_display_irq_handler(dev, de_iir);
2177 }
2178
2179 if (INTEL_INFO(dev)->gen >= 6) {
2180 u32 pm_iir = I915_READ(GEN6_PMIIR);
2181 if (pm_iir) {
2182 I915_WRITE(GEN6_PMIIR, pm_iir);
2183 ret = IRQ_HANDLED;
2184 gen6_rps_irq_handler(dev_priv, pm_iir);
2185 }
2186 }
2187
2188 I915_WRITE(DEIER, de_ier);
2189 POSTING_READ(DEIER);
2190 if (!HAS_PCH_NOP(dev)) {
2191 I915_WRITE(SDEIER, sde_ier);
2192 POSTING_READ(SDEIER);
2193 }
2194
2195 return ret;
2196 }
2197
2198 static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2199 const u32 hpd[HPD_NUM_PINS])
2200 {
2201 struct drm_i915_private *dev_priv = to_i915(dev);
2202 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2203
2204 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2205 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2206
2207 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2208 dig_hotplug_reg, hpd,
2209 bxt_port_hotplug_long_detect);
2210
2211 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2212 }
2213
2214 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2215 {
2216 struct drm_device *dev = arg;
2217 struct drm_i915_private *dev_priv = dev->dev_private;
2218 u32 master_ctl;
2219 irqreturn_t ret = IRQ_NONE;
2220 uint32_t tmp = 0;
2221 enum pipe pipe;
2222 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2223
2224 if (!intel_irqs_enabled(dev_priv))
2225 return IRQ_NONE;
2226
2227 if (INTEL_INFO(dev_priv)->gen >= 9)
2228 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2229 GEN9_AUX_CHANNEL_D;
2230
2231 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2232 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2233 if (!master_ctl)
2234 return IRQ_NONE;
2235
2236 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2237
2238 /* Find, clear, then process each source of interrupt */
2239
2240 ret = gen8_gt_irq_handler(dev_priv, master_ctl);
2241
2242 if (master_ctl & GEN8_DE_MISC_IRQ) {
2243 tmp = I915_READ(GEN8_DE_MISC_IIR);
2244 if (tmp) {
2245 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2246 ret = IRQ_HANDLED;
2247 if (tmp & GEN8_DE_MISC_GSE)
2248 intel_opregion_asle_intr(dev);
2249 else
2250 DRM_ERROR("Unexpected DE Misc interrupt\n");
2251 }
2252 else
2253 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2254 }
2255
2256 if (master_ctl & GEN8_DE_PORT_IRQ) {
2257 tmp = I915_READ(GEN8_DE_PORT_IIR);
2258 if (tmp) {
2259 bool found = false;
2260 u32 hotplug_trigger = 0;
2261
2262 if (IS_BROXTON(dev_priv))
2263 hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
2264 else if (IS_BROADWELL(dev_priv))
2265 hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
2266
2267 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2268 ret = IRQ_HANDLED;
2269
2270 if (tmp & aux_mask) {
2271 dp_aux_irq_handler(dev);
2272 found = true;
2273 }
2274
2275 if (hotplug_trigger) {
2276 if (IS_BROXTON(dev))
2277 bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
2278 else
2279 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
2280 found = true;
2281 }
2282
2283 if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2284 gmbus_irq_handler(dev);
2285 found = true;
2286 }
2287
2288 if (!found)
2289 DRM_ERROR("Unexpected DE Port interrupt\n");
2290 }
2291 else
2292 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2293 }
2294
2295 for_each_pipe(dev_priv, pipe) {
2296 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2297
2298 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2299 continue;
2300
2301 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2302 if (pipe_iir) {
2303 ret = IRQ_HANDLED;
2304 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2305
2306 if (pipe_iir & GEN8_PIPE_VBLANK &&
2307 intel_pipe_handle_vblank(dev, pipe))
2308 intel_check_page_flip(dev, pipe);
2309
2310 if (INTEL_INFO(dev_priv)->gen >= 9)
2311 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2312 else
2313 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2314
2315 if (flip_done) {
2316 intel_prepare_page_flip(dev, pipe);
2317 intel_finish_page_flip_plane(dev, pipe);
2318 }
2319
2320 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2321 hsw_pipe_crc_irq_handler(dev, pipe);
2322
2323 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2324 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2325 pipe);
2326
2327
2328 if (INTEL_INFO(dev_priv)->gen >= 9)
2329 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2330 else
2331 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2332
2333 if (fault_errors)
2334 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2335 pipe_name(pipe),
2336 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2337 } else
2338 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2339 }
2340
2341 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2342 master_ctl & GEN8_DE_PCH_IRQ) {
2343 /*
2344 * FIXME(BDW): Assume for now that the new interrupt handling
2345 * scheme also closed the SDE interrupt handling race we've seen
2346 * on older pch-split platforms. But this needs testing.
2347 */
2348 u32 pch_iir = I915_READ(SDEIIR);
2349 if (pch_iir) {
2350 I915_WRITE(SDEIIR, pch_iir);
2351 ret = IRQ_HANDLED;
2352
2353 if (HAS_PCH_SPT(dev_priv))
2354 spt_irq_handler(dev, pch_iir);
2355 else
2356 cpt_irq_handler(dev, pch_iir);
2357 } else
2358 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2359
2360 }
2361
2362 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2363 POSTING_READ_FW(GEN8_MASTER_IRQ);
2364
2365 return ret;
2366 }
2367
2368 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2369 bool reset_completed)
2370 {
2371 struct intel_engine_cs *ring;
2372 int i;
2373
2374 /*
2375 * Notify all waiters for GPU completion events that reset state has
2376 * been changed, and that they need to restart their wait after
2377 * checking for potential errors (and bail out to drop locks if there is
2378 * a gpu reset pending so that i915_error_work_func can acquire them).
2379 */
2380
2381 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2382 for_each_ring(ring, dev_priv, i)
2383 wake_up_all(&ring->irq_queue);
2384
2385 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2386 wake_up_all(&dev_priv->pending_flip_queue);
2387
2388 /*
2389 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2390 * reset state is cleared.
2391 */
2392 if (reset_completed)
2393 wake_up_all(&dev_priv->gpu_error.reset_queue);
2394 }
2395
2396 /**
2397 * i915_reset_and_wakeup - do process context error handling work
2398 * @dev: drm device
2399 *
2400 * Fire an error uevent so userspace can see that a hang or error
2401 * was detected.
2402 */
2403 static void i915_reset_and_wakeup(struct drm_device *dev)
2404 {
2405 struct drm_i915_private *dev_priv = to_i915(dev);
2406 struct i915_gpu_error *error = &dev_priv->gpu_error;
2407 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2408 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2409 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2410 int ret;
2411
2412 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2413
2414 /*
2415 * Note that there's only one work item which does gpu resets, so we
2416 * need not worry about concurrent gpu resets potentially incrementing
2417 * error->reset_counter twice. We only need to take care of another
2418 * racing irq/hangcheck declaring the gpu dead for a second time. A
2419 * quick check for that is good enough: schedule_work ensures the
2420 * correct ordering between hang detection and this work item, and since
2421 * the reset in-progress bit is only ever set by code outside of this
2422 * work we don't need to worry about any other races.
2423 */
2424 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2425 DRM_DEBUG_DRIVER("resetting chip\n");
2426 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2427 reset_event);
2428
2429 /*
2430 * In most cases it's guaranteed that we get here with an RPM
2431 * reference held, for example because there is a pending GPU
2432 * request that won't finish until the reset is done. This
2433 * isn't the case at least when we get here by doing a
2434 * simulated reset via debugs, so get an RPM reference.
2435 */
2436 intel_runtime_pm_get(dev_priv);
2437
2438 intel_prepare_reset(dev);
2439
2440 /*
2441 * All state reset _must_ be completed before we update the
2442 * reset counter, for otherwise waiters might miss the reset
2443 * pending state and not properly drop locks, resulting in
2444 * deadlocks with the reset work.
2445 */
2446 ret = i915_reset(dev);
2447
2448 intel_finish_reset(dev);
2449
2450 intel_runtime_pm_put(dev_priv);
2451
2452 if (ret == 0) {
2453 /*
2454 * After all the gem state is reset, increment the reset
2455 * counter and wake up everyone waiting for the reset to
2456 * complete.
2457 *
2458 * Since unlock operations are a one-sided barrier only,
2459 * we need to insert a barrier here to order any seqno
2460 * updates before
2461 * the counter increment.
2462 */
2463 smp_mb__before_atomic();
2464 atomic_inc(&dev_priv->gpu_error.reset_counter);
2465
2466 kobject_uevent_env(&dev->primary->kdev->kobj,
2467 KOBJ_CHANGE, reset_done_event);
2468 } else {
2469 atomic_or(I915_WEDGED, &error->reset_counter);
2470 }
2471
2472 /*
2473 * Note: The wake_up also serves as a memory barrier so that
2474 * waiters see the update value of the reset counter atomic_t.
2475 */
2476 i915_error_wake_up(dev_priv, true);
2477 }
2478 }
2479
2480 static void i915_report_and_clear_eir(struct drm_device *dev)
2481 {
2482 struct drm_i915_private *dev_priv = dev->dev_private;
2483 uint32_t instdone[I915_NUM_INSTDONE_REG];
2484 u32 eir = I915_READ(EIR);
2485 int pipe, i;
2486
2487 if (!eir)
2488 return;
2489
2490 pr_err("render error detected, EIR: 0x%08x\n", eir);
2491
2492 i915_get_extra_instdone(dev, instdone);
2493
2494 if (IS_G4X(dev)) {
2495 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2496 u32 ipeir = I915_READ(IPEIR_I965);
2497
2498 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2499 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2500 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2501 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2502 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2503 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2504 I915_WRITE(IPEIR_I965, ipeir);
2505 POSTING_READ(IPEIR_I965);
2506 }
2507 if (eir & GM45_ERROR_PAGE_TABLE) {
2508 u32 pgtbl_err = I915_READ(PGTBL_ER);
2509 pr_err("page table error\n");
2510 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2511 I915_WRITE(PGTBL_ER, pgtbl_err);
2512 POSTING_READ(PGTBL_ER);
2513 }
2514 }
2515
2516 if (!IS_GEN2(dev)) {
2517 if (eir & I915_ERROR_PAGE_TABLE) {
2518 u32 pgtbl_err = I915_READ(PGTBL_ER);
2519 pr_err("page table error\n");
2520 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2521 I915_WRITE(PGTBL_ER, pgtbl_err);
2522 POSTING_READ(PGTBL_ER);
2523 }
2524 }
2525
2526 if (eir & I915_ERROR_MEMORY_REFRESH) {
2527 pr_err("memory refresh error:\n");
2528 for_each_pipe(dev_priv, pipe)
2529 pr_err("pipe %c stat: 0x%08x\n",
2530 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2531 /* pipestat has already been acked */
2532 }
2533 if (eir & I915_ERROR_INSTRUCTION) {
2534 pr_err("instruction error\n");
2535 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2536 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2537 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2538 if (INTEL_INFO(dev)->gen < 4) {
2539 u32 ipeir = I915_READ(IPEIR);
2540
2541 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2542 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2543 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2544 I915_WRITE(IPEIR, ipeir);
2545 POSTING_READ(IPEIR);
2546 } else {
2547 u32 ipeir = I915_READ(IPEIR_I965);
2548
2549 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2550 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2551 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2552 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2553 I915_WRITE(IPEIR_I965, ipeir);
2554 POSTING_READ(IPEIR_I965);
2555 }
2556 }
2557
2558 I915_WRITE(EIR, eir);
2559 POSTING_READ(EIR);
2560 eir = I915_READ(EIR);
2561 if (eir) {
2562 /*
2563 * some errors might have become stuck,
2564 * mask them.
2565 */
2566 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2567 I915_WRITE(EMR, I915_READ(EMR) | eir);
2568 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2569 }
2570 }
2571
2572 /**
2573 * i915_handle_error - handle a gpu error
2574 * @dev: drm device
2575 *
2576 * Do some basic checking of register state at error time and
2577 * dump it to the syslog. Also call i915_capture_error_state() to make
2578 * sure we get a record and make it available in debugfs. Fire a uevent
2579 * so userspace knows something bad happened (should trigger collection
2580 * of a ring dump etc.).
2581 */
2582 void i915_handle_error(struct drm_device *dev, bool wedged,
2583 const char *fmt, ...)
2584 {
2585 struct drm_i915_private *dev_priv = dev->dev_private;
2586 va_list args;
2587 char error_msg[80];
2588
2589 va_start(args, fmt);
2590 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2591 va_end(args);
2592
2593 i915_capture_error_state(dev, wedged, error_msg);
2594 i915_report_and_clear_eir(dev);
2595
2596 if (wedged) {
2597 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2598 &dev_priv->gpu_error.reset_counter);
2599
2600 /*
2601 * Wakeup waiting processes so that the reset function
2602 * i915_reset_and_wakeup doesn't deadlock trying to grab
2603 * various locks. By bumping the reset counter first, the woken
2604 * processes will see a reset in progress and back off,
2605 * releasing their locks and then wait for the reset completion.
2606 * We must do this for _all_ gpu waiters that might hold locks
2607 * that the reset work needs to acquire.
2608 *
2609 * Note: The wake_up serves as the required memory barrier to
2610 * ensure that the waiters see the updated value of the reset
2611 * counter atomic_t.
2612 */
2613 i915_error_wake_up(dev_priv, false);
2614 }
2615
2616 i915_reset_and_wakeup(dev);
2617 }
2618
2619 /* Called from drm generic code, passed 'crtc' which
2620 * we use as a pipe index
2621 */
2622 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2623 {
2624 struct drm_i915_private *dev_priv = dev->dev_private;
2625 unsigned long irqflags;
2626
2627 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2628 if (INTEL_INFO(dev)->gen >= 4)
2629 i915_enable_pipestat(dev_priv, pipe,
2630 PIPE_START_VBLANK_INTERRUPT_STATUS);
2631 else
2632 i915_enable_pipestat(dev_priv, pipe,
2633 PIPE_VBLANK_INTERRUPT_STATUS);
2634 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2635
2636 return 0;
2637 }
2638
2639 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2640 {
2641 struct drm_i915_private *dev_priv = dev->dev_private;
2642 unsigned long irqflags;
2643 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2644 DE_PIPE_VBLANK(pipe);
2645
2646 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2647 ironlake_enable_display_irq(dev_priv, bit);
2648 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2649
2650 return 0;
2651 }
2652
2653 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2654 {
2655 struct drm_i915_private *dev_priv = dev->dev_private;
2656 unsigned long irqflags;
2657
2658 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2659 i915_enable_pipestat(dev_priv, pipe,
2660 PIPE_START_VBLANK_INTERRUPT_STATUS);
2661 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2662
2663 return 0;
2664 }
2665
2666 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2667 {
2668 struct drm_i915_private *dev_priv = dev->dev_private;
2669 unsigned long irqflags;
2670
2671 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2672 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2673 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2674 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2675 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2676 return 0;
2677 }
2678
2679 /* Called from drm generic code, passed 'crtc' which
2680 * we use as a pipe index
2681 */
2682 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2683 {
2684 struct drm_i915_private *dev_priv = dev->dev_private;
2685 unsigned long irqflags;
2686
2687 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2688 i915_disable_pipestat(dev_priv, pipe,
2689 PIPE_VBLANK_INTERRUPT_STATUS |
2690 PIPE_START_VBLANK_INTERRUPT_STATUS);
2691 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2692 }
2693
2694 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2695 {
2696 struct drm_i915_private *dev_priv = dev->dev_private;
2697 unsigned long irqflags;
2698 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2699 DE_PIPE_VBLANK(pipe);
2700
2701 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2702 ironlake_disable_display_irq(dev_priv, bit);
2703 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2704 }
2705
2706 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2707 {
2708 struct drm_i915_private *dev_priv = dev->dev_private;
2709 unsigned long irqflags;
2710
2711 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2712 i915_disable_pipestat(dev_priv, pipe,
2713 PIPE_START_VBLANK_INTERRUPT_STATUS);
2714 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2715 }
2716
2717 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2718 {
2719 struct drm_i915_private *dev_priv = dev->dev_private;
2720 unsigned long irqflags;
2721
2722 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2723 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2724 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2725 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2726 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2727 }
2728
2729 static bool
2730 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2731 {
2732 return (list_empty(&ring->request_list) ||
2733 i915_seqno_passed(seqno, ring->last_submitted_seqno));
2734 }
2735
2736 static bool
2737 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2738 {
2739 if (INTEL_INFO(dev)->gen >= 8) {
2740 return (ipehr >> 23) == 0x1c;
2741 } else {
2742 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2743 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2744 MI_SEMAPHORE_REGISTER);
2745 }
2746 }
2747
2748 static struct intel_engine_cs *
2749 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2750 {
2751 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2752 struct intel_engine_cs *signaller;
2753 int i;
2754
2755 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2756 for_each_ring(signaller, dev_priv, i) {
2757 if (ring == signaller)
2758 continue;
2759
2760 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2761 return signaller;
2762 }
2763 } else {
2764 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2765
2766 for_each_ring(signaller, dev_priv, i) {
2767 if(ring == signaller)
2768 continue;
2769
2770 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2771 return signaller;
2772 }
2773 }
2774
2775 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2776 ring->id, ipehr, offset);
2777
2778 return NULL;
2779 }
2780
2781 static struct intel_engine_cs *
2782 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2783 {
2784 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2785 u32 cmd, ipehr, head;
2786 u64 offset = 0;
2787 int i, backwards;
2788
2789 /*
2790 * This function does not support execlist mode - any attempt to
2791 * proceed further into this function will result in a kernel panic
2792 * when dereferencing ring->buffer, which is not set up in execlist
2793 * mode.
2794 *
2795 * The correct way of doing it would be to derive the currently
2796 * executing ring buffer from the current context, which is derived
2797 * from the currently running request. Unfortunately, to get the
2798 * current request we would have to grab the struct_mutex before doing
2799 * anything else, which would be ill-advised since some other thread
2800 * might have grabbed it already and managed to hang itself, causing
2801 * the hang checker to deadlock.
2802 *
2803 * Therefore, this function does not support execlist mode in its
2804 * current form. Just return NULL and move on.
2805 */
2806 if (ring->buffer == NULL)
2807 return NULL;
2808
2809 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2810 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2811 return NULL;
2812
2813 /*
2814 * HEAD is likely pointing to the dword after the actual command,
2815 * so scan backwards until we find the MBOX. But limit it to just 3
2816 * or 4 dwords depending on the semaphore wait command size.
2817 * Note that we don't care about ACTHD here since that might
2818 * point at at batch, and semaphores are always emitted into the
2819 * ringbuffer itself.
2820 */
2821 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2822 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2823
2824 for (i = backwards; i; --i) {
2825 /*
2826 * Be paranoid and presume the hw has gone off into the wild -
2827 * our ring is smaller than what the hardware (and hence
2828 * HEAD_ADDR) allows. Also handles wrap-around.
2829 */
2830 head &= ring->buffer->size - 1;
2831
2832 /* This here seems to blow up */
2833 cmd = ioread32(ring->buffer->virtual_start + head);
2834 if (cmd == ipehr)
2835 break;
2836
2837 head -= 4;
2838 }
2839
2840 if (!i)
2841 return NULL;
2842
2843 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2844 if (INTEL_INFO(ring->dev)->gen >= 8) {
2845 offset = ioread32(ring->buffer->virtual_start + head + 12);
2846 offset <<= 32;
2847 offset = ioread32(ring->buffer->virtual_start + head + 8);
2848 }
2849 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2850 }
2851
2852 static int semaphore_passed(struct intel_engine_cs *ring)
2853 {
2854 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2855 struct intel_engine_cs *signaller;
2856 u32 seqno;
2857
2858 ring->hangcheck.deadlock++;
2859
2860 signaller = semaphore_waits_for(ring, &seqno);
2861 if (signaller == NULL)
2862 return -1;
2863
2864 /* Prevent pathological recursion due to driver bugs */
2865 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2866 return -1;
2867
2868 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2869 return 1;
2870
2871 /* cursory check for an unkickable deadlock */
2872 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2873 semaphore_passed(signaller) < 0)
2874 return -1;
2875
2876 return 0;
2877 }
2878
2879 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2880 {
2881 struct intel_engine_cs *ring;
2882 int i;
2883
2884 for_each_ring(ring, dev_priv, i)
2885 ring->hangcheck.deadlock = 0;
2886 }
2887
2888 static enum intel_ring_hangcheck_action
2889 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2890 {
2891 struct drm_device *dev = ring->dev;
2892 struct drm_i915_private *dev_priv = dev->dev_private;
2893 u32 tmp;
2894
2895 if (acthd != ring->hangcheck.acthd) {
2896 if (acthd > ring->hangcheck.max_acthd) {
2897 ring->hangcheck.max_acthd = acthd;
2898 return HANGCHECK_ACTIVE;
2899 }
2900
2901 return HANGCHECK_ACTIVE_LOOP;
2902 }
2903
2904 if (IS_GEN2(dev))
2905 return HANGCHECK_HUNG;
2906
2907 /* Is the chip hanging on a WAIT_FOR_EVENT?
2908 * If so we can simply poke the RB_WAIT bit
2909 * and break the hang. This should work on
2910 * all but the second generation chipsets.
2911 */
2912 tmp = I915_READ_CTL(ring);
2913 if (tmp & RING_WAIT) {
2914 i915_handle_error(dev, false,
2915 "Kicking stuck wait on %s",
2916 ring->name);
2917 I915_WRITE_CTL(ring, tmp);
2918 return HANGCHECK_KICK;
2919 }
2920
2921 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2922 switch (semaphore_passed(ring)) {
2923 default:
2924 return HANGCHECK_HUNG;
2925 case 1:
2926 i915_handle_error(dev, false,
2927 "Kicking stuck semaphore on %s",
2928 ring->name);
2929 I915_WRITE_CTL(ring, tmp);
2930 return HANGCHECK_KICK;
2931 case 0:
2932 return HANGCHECK_WAIT;
2933 }
2934 }
2935
2936 return HANGCHECK_HUNG;
2937 }
2938
2939 /*
2940 * This is called when the chip hasn't reported back with completed
2941 * batchbuffers in a long time. We keep track per ring seqno progress and
2942 * if there are no progress, hangcheck score for that ring is increased.
2943 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2944 * we kick the ring. If we see no progress on three subsequent calls
2945 * we assume chip is wedged and try to fix it by resetting the chip.
2946 */
2947 static void i915_hangcheck_elapsed(struct work_struct *work)
2948 {
2949 struct drm_i915_private *dev_priv =
2950 container_of(work, typeof(*dev_priv),
2951 gpu_error.hangcheck_work.work);
2952 struct drm_device *dev = dev_priv->dev;
2953 struct intel_engine_cs *ring;
2954 int i;
2955 int busy_count = 0, rings_hung = 0;
2956 bool stuck[I915_NUM_RINGS] = { 0 };
2957 #define BUSY 1
2958 #define KICK 5
2959 #define HUNG 20
2960
2961 if (!i915.enable_hangcheck)
2962 return;
2963
2964 for_each_ring(ring, dev_priv, i) {
2965 u64 acthd;
2966 u32 seqno;
2967 bool busy = true;
2968
2969 semaphore_clear_deadlocks(dev_priv);
2970
2971 seqno = ring->get_seqno(ring, false);
2972 acthd = intel_ring_get_active_head(ring);
2973
2974 if (ring->hangcheck.seqno == seqno) {
2975 if (ring_idle(ring, seqno)) {
2976 ring->hangcheck.action = HANGCHECK_IDLE;
2977
2978 if (waitqueue_active(&ring->irq_queue)) {
2979 /* Issue a wake-up to catch stuck h/w. */
2980 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2981 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2982 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2983 ring->name);
2984 else
2985 DRM_INFO("Fake missed irq on %s\n",
2986 ring->name);
2987 wake_up_all(&ring->irq_queue);
2988 }
2989 /* Safeguard against driver failure */
2990 ring->hangcheck.score += BUSY;
2991 } else
2992 busy = false;
2993 } else {
2994 /* We always increment the hangcheck score
2995 * if the ring is busy and still processing
2996 * the same request, so that no single request
2997 * can run indefinitely (such as a chain of
2998 * batches). The only time we do not increment
2999 * the hangcheck score on this ring, if this
3000 * ring is in a legitimate wait for another
3001 * ring. In that case the waiting ring is a
3002 * victim and we want to be sure we catch the
3003 * right culprit. Then every time we do kick
3004 * the ring, add a small increment to the
3005 * score so that we can catch a batch that is
3006 * being repeatedly kicked and so responsible
3007 * for stalling the machine.
3008 */
3009 ring->hangcheck.action = ring_stuck(ring,
3010 acthd);
3011
3012 switch (ring->hangcheck.action) {
3013 case HANGCHECK_IDLE:
3014 case HANGCHECK_WAIT:
3015 case HANGCHECK_ACTIVE:
3016 break;
3017 case HANGCHECK_ACTIVE_LOOP:
3018 ring->hangcheck.score += BUSY;
3019 break;
3020 case HANGCHECK_KICK:
3021 ring->hangcheck.score += KICK;
3022 break;
3023 case HANGCHECK_HUNG:
3024 ring->hangcheck.score += HUNG;
3025 stuck[i] = true;
3026 break;
3027 }
3028 }
3029 } else {
3030 ring->hangcheck.action = HANGCHECK_ACTIVE;
3031
3032 /* Gradually reduce the count so that we catch DoS
3033 * attempts across multiple batches.
3034 */
3035 if (ring->hangcheck.score > 0)
3036 ring->hangcheck.score--;
3037
3038 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3039 }
3040
3041 ring->hangcheck.seqno = seqno;
3042 ring->hangcheck.acthd = acthd;
3043 busy_count += busy;
3044 }
3045
3046 for_each_ring(ring, dev_priv, i) {
3047 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3048 DRM_INFO("%s on %s\n",
3049 stuck[i] ? "stuck" : "no progress",
3050 ring->name);
3051 rings_hung++;
3052 }
3053 }
3054
3055 if (rings_hung)
3056 return i915_handle_error(dev, true, "Ring hung");
3057
3058 if (busy_count)
3059 /* Reset timer case chip hangs without another request
3060 * being added */
3061 i915_queue_hangcheck(dev);
3062 }
3063
3064 void i915_queue_hangcheck(struct drm_device *dev)
3065 {
3066 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3067
3068 if (!i915.enable_hangcheck)
3069 return;
3070
3071 /* Don't continually defer the hangcheck so that it is always run at
3072 * least once after work has been scheduled on any ring. Otherwise,
3073 * we will ignore a hung ring if a second ring is kept busy.
3074 */
3075
3076 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3077 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3078 }
3079
3080 static void ibx_irq_reset(struct drm_device *dev)
3081 {
3082 struct drm_i915_private *dev_priv = dev->dev_private;
3083
3084 if (HAS_PCH_NOP(dev))
3085 return;
3086
3087 GEN5_IRQ_RESET(SDE);
3088
3089 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3090 I915_WRITE(SERR_INT, 0xffffffff);
3091 }
3092
3093 /*
3094 * SDEIER is also touched by the interrupt handler to work around missed PCH
3095 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3096 * instead we unconditionally enable all PCH interrupt sources here, but then
3097 * only unmask them as needed with SDEIMR.
3098 *
3099 * This function needs to be called before interrupts are enabled.
3100 */
3101 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3102 {
3103 struct drm_i915_private *dev_priv = dev->dev_private;
3104
3105 if (HAS_PCH_NOP(dev))
3106 return;
3107
3108 WARN_ON(I915_READ(SDEIER) != 0);
3109 I915_WRITE(SDEIER, 0xffffffff);
3110 POSTING_READ(SDEIER);
3111 }
3112
3113 static void gen5_gt_irq_reset(struct drm_device *dev)
3114 {
3115 struct drm_i915_private *dev_priv = dev->dev_private;
3116
3117 GEN5_IRQ_RESET(GT);
3118 if (INTEL_INFO(dev)->gen >= 6)
3119 GEN5_IRQ_RESET(GEN6_PM);
3120 }
3121
3122 /* drm_dma.h hooks
3123 */
3124 static void ironlake_irq_reset(struct drm_device *dev)
3125 {
3126 struct drm_i915_private *dev_priv = dev->dev_private;
3127
3128 I915_WRITE(HWSTAM, 0xffffffff);
3129
3130 GEN5_IRQ_RESET(DE);
3131 if (IS_GEN7(dev))
3132 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3133
3134 gen5_gt_irq_reset(dev);
3135
3136 ibx_irq_reset(dev);
3137 }
3138
3139 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3140 {
3141 enum pipe pipe;
3142
3143 i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
3144 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3145
3146 for_each_pipe(dev_priv, pipe)
3147 I915_WRITE(PIPESTAT(pipe), 0xffff);
3148
3149 GEN5_IRQ_RESET(VLV_);
3150 }
3151
3152 static void valleyview_irq_preinstall(struct drm_device *dev)
3153 {
3154 struct drm_i915_private *dev_priv = dev->dev_private;
3155
3156 /* VLV magic */
3157 I915_WRITE(VLV_IMR, 0);
3158 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3159 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3160 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3161
3162 gen5_gt_irq_reset(dev);
3163
3164 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3165
3166 vlv_display_irq_reset(dev_priv);
3167 }
3168
3169 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3170 {
3171 GEN8_IRQ_RESET_NDX(GT, 0);
3172 GEN8_IRQ_RESET_NDX(GT, 1);
3173 GEN8_IRQ_RESET_NDX(GT, 2);
3174 GEN8_IRQ_RESET_NDX(GT, 3);
3175 }
3176
3177 static void gen8_irq_reset(struct drm_device *dev)
3178 {
3179 struct drm_i915_private *dev_priv = dev->dev_private;
3180 int pipe;
3181
3182 I915_WRITE(GEN8_MASTER_IRQ, 0);
3183 POSTING_READ(GEN8_MASTER_IRQ);
3184
3185 gen8_gt_irq_reset(dev_priv);
3186
3187 for_each_pipe(dev_priv, pipe)
3188 if (intel_display_power_is_enabled(dev_priv,
3189 POWER_DOMAIN_PIPE(pipe)))
3190 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3191
3192 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3193 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3194 GEN5_IRQ_RESET(GEN8_PCU_);
3195
3196 if (HAS_PCH_SPLIT(dev))
3197 ibx_irq_reset(dev);
3198 }
3199
3200 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3201 unsigned int pipe_mask)
3202 {
3203 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3204
3205 spin_lock_irq(&dev_priv->irq_lock);
3206 if (pipe_mask & 1 << PIPE_A)
3207 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3208 dev_priv->de_irq_mask[PIPE_A],
3209 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3210 if (pipe_mask & 1 << PIPE_B)
3211 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3212 dev_priv->de_irq_mask[PIPE_B],
3213 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3214 if (pipe_mask & 1 << PIPE_C)
3215 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3216 dev_priv->de_irq_mask[PIPE_C],
3217 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3218 spin_unlock_irq(&dev_priv->irq_lock);
3219 }
3220
3221 static void cherryview_irq_preinstall(struct drm_device *dev)
3222 {
3223 struct drm_i915_private *dev_priv = dev->dev_private;
3224
3225 I915_WRITE(GEN8_MASTER_IRQ, 0);
3226 POSTING_READ(GEN8_MASTER_IRQ);
3227
3228 gen8_gt_irq_reset(dev_priv);
3229
3230 GEN5_IRQ_RESET(GEN8_PCU_);
3231
3232 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3233
3234 vlv_display_irq_reset(dev_priv);
3235 }
3236
3237 static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3238 const u32 hpd[HPD_NUM_PINS])
3239 {
3240 struct drm_i915_private *dev_priv = to_i915(dev);
3241 struct intel_encoder *encoder;
3242 u32 enabled_irqs = 0;
3243
3244 for_each_intel_encoder(dev, encoder)
3245 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3246 enabled_irqs |= hpd[encoder->hpd_pin];
3247
3248 return enabled_irqs;
3249 }
3250
3251 static void ibx_hpd_irq_setup(struct drm_device *dev)
3252 {
3253 struct drm_i915_private *dev_priv = dev->dev_private;
3254 u32 hotplug_irqs, hotplug, enabled_irqs;
3255
3256 if (HAS_PCH_IBX(dev)) {
3257 hotplug_irqs = SDE_HOTPLUG_MASK;
3258 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3259 } else {
3260 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3261 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3262 }
3263
3264 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3265
3266 /*
3267 * Enable digital hotplug on the PCH, and configure the DP short pulse
3268 * duration to 2ms (which is the minimum in the Display Port spec).
3269 * The pulse duration bits are reserved on LPT+.
3270 */
3271 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3272 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3273 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3274 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3275 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3276 /*
3277 * When CPU and PCH are on the same package, port A
3278 * HPD must be enabled in both north and south.
3279 */
3280 if (HAS_PCH_LPT_LP(dev))
3281 hotplug |= PORTA_HOTPLUG_ENABLE;
3282 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3283 }
3284
3285 static void spt_hpd_irq_setup(struct drm_device *dev)
3286 {
3287 struct drm_i915_private *dev_priv = dev->dev_private;
3288 u32 hotplug_irqs, hotplug, enabled_irqs;
3289
3290 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3291 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3292
3293 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3294
3295 /* Enable digital hotplug on the PCH */
3296 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3297 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3298 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3299 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3300
3301 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3302 hotplug |= PORTE_HOTPLUG_ENABLE;
3303 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3304 }
3305
3306 static void ilk_hpd_irq_setup(struct drm_device *dev)
3307 {
3308 struct drm_i915_private *dev_priv = dev->dev_private;
3309 u32 hotplug_irqs, hotplug, enabled_irqs;
3310
3311 if (INTEL_INFO(dev)->gen >= 8) {
3312 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3313 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3314
3315 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3316 } else if (INTEL_INFO(dev)->gen >= 7) {
3317 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3318 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3319
3320 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3321 } else {
3322 hotplug_irqs = DE_DP_A_HOTPLUG;
3323 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3324
3325 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3326 }
3327
3328 /*
3329 * Enable digital hotplug on the CPU, and configure the DP short pulse
3330 * duration to 2ms (which is the minimum in the Display Port spec)
3331 * The pulse duration bits are reserved on HSW+.
3332 */
3333 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3334 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3335 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3336 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3337
3338 ibx_hpd_irq_setup(dev);
3339 }
3340
3341 static void bxt_hpd_irq_setup(struct drm_device *dev)
3342 {
3343 struct drm_i915_private *dev_priv = dev->dev_private;
3344 u32 hotplug_irqs, hotplug, enabled_irqs;
3345
3346 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3347 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3348
3349 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3350
3351 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3352 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3353 PORTA_HOTPLUG_ENABLE;
3354 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3355 }
3356
3357 static void ibx_irq_postinstall(struct drm_device *dev)
3358 {
3359 struct drm_i915_private *dev_priv = dev->dev_private;
3360 u32 mask;
3361
3362 if (HAS_PCH_NOP(dev))
3363 return;
3364
3365 if (HAS_PCH_IBX(dev))
3366 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3367 else
3368 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3369
3370 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3371 I915_WRITE(SDEIMR, ~mask);
3372 }
3373
3374 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3375 {
3376 struct drm_i915_private *dev_priv = dev->dev_private;
3377 u32 pm_irqs, gt_irqs;
3378
3379 pm_irqs = gt_irqs = 0;
3380
3381 dev_priv->gt_irq_mask = ~0;
3382 if (HAS_L3_DPF(dev)) {
3383 /* L3 parity interrupt is always unmasked. */
3384 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3385 gt_irqs |= GT_PARITY_ERROR(dev);
3386 }
3387
3388 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3389 if (IS_GEN5(dev)) {
3390 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3391 ILK_BSD_USER_INTERRUPT;
3392 } else {
3393 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3394 }
3395
3396 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3397
3398 if (INTEL_INFO(dev)->gen >= 6) {
3399 /*
3400 * RPS interrupts will get enabled/disabled on demand when RPS
3401 * itself is enabled/disabled.
3402 */
3403 if (HAS_VEBOX(dev))
3404 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3405
3406 dev_priv->pm_irq_mask = 0xffffffff;
3407 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3408 }
3409 }
3410
3411 static int ironlake_irq_postinstall(struct drm_device *dev)
3412 {
3413 struct drm_i915_private *dev_priv = dev->dev_private;
3414 u32 display_mask, extra_mask;
3415
3416 if (INTEL_INFO(dev)->gen >= 7) {
3417 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3418 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3419 DE_PLANEB_FLIP_DONE_IVB |
3420 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3421 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3422 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3423 DE_DP_A_HOTPLUG_IVB);
3424 } else {
3425 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3426 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3427 DE_AUX_CHANNEL_A |
3428 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3429 DE_POISON);
3430 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3431 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3432 DE_DP_A_HOTPLUG);
3433 }
3434
3435 dev_priv->irq_mask = ~display_mask;
3436
3437 I915_WRITE(HWSTAM, 0xeffe);
3438
3439 ibx_irq_pre_postinstall(dev);
3440
3441 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3442
3443 gen5_gt_irq_postinstall(dev);
3444
3445 ibx_irq_postinstall(dev);
3446
3447 if (IS_IRONLAKE_M(dev)) {
3448 /* Enable PCU event interrupts
3449 *
3450 * spinlocking not required here for correctness since interrupt
3451 * setup is guaranteed to run in single-threaded context. But we
3452 * need it to make the assert_spin_locked happy. */
3453 spin_lock_irq(&dev_priv->irq_lock);
3454 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3455 spin_unlock_irq(&dev_priv->irq_lock);
3456 }
3457
3458 return 0;
3459 }
3460
3461 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3462 {
3463 u32 pipestat_mask;
3464 u32 iir_mask;
3465 enum pipe pipe;
3466
3467 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3468 PIPE_FIFO_UNDERRUN_STATUS;
3469
3470 for_each_pipe(dev_priv, pipe)
3471 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3472 POSTING_READ(PIPESTAT(PIPE_A));
3473
3474 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3475 PIPE_CRC_DONE_INTERRUPT_STATUS;
3476
3477 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3478 for_each_pipe(dev_priv, pipe)
3479 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3480
3481 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3482 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3483 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3484 if (IS_CHERRYVIEW(dev_priv))
3485 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3486 dev_priv->irq_mask &= ~iir_mask;
3487
3488 I915_WRITE(VLV_IIR, iir_mask);
3489 I915_WRITE(VLV_IIR, iir_mask);
3490 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3491 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3492 POSTING_READ(VLV_IMR);
3493 }
3494
3495 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3496 {
3497 u32 pipestat_mask;
3498 u32 iir_mask;
3499 enum pipe pipe;
3500
3501 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3502 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3503 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3504 if (IS_CHERRYVIEW(dev_priv))
3505 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3506
3507 dev_priv->irq_mask |= iir_mask;
3508 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3509 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3510 I915_WRITE(VLV_IIR, iir_mask);
3511 I915_WRITE(VLV_IIR, iir_mask);
3512 POSTING_READ(VLV_IIR);
3513
3514 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3515 PIPE_CRC_DONE_INTERRUPT_STATUS;
3516
3517 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3518 for_each_pipe(dev_priv, pipe)
3519 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3520
3521 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3522 PIPE_FIFO_UNDERRUN_STATUS;
3523
3524 for_each_pipe(dev_priv, pipe)
3525 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3526 POSTING_READ(PIPESTAT(PIPE_A));
3527 }
3528
3529 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3530 {
3531 assert_spin_locked(&dev_priv->irq_lock);
3532
3533 if (dev_priv->display_irqs_enabled)
3534 return;
3535
3536 dev_priv->display_irqs_enabled = true;
3537
3538 if (intel_irqs_enabled(dev_priv))
3539 valleyview_display_irqs_install(dev_priv);
3540 }
3541
3542 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3543 {
3544 assert_spin_locked(&dev_priv->irq_lock);
3545
3546 if (!dev_priv->display_irqs_enabled)
3547 return;
3548
3549 dev_priv->display_irqs_enabled = false;
3550
3551 if (intel_irqs_enabled(dev_priv))
3552 valleyview_display_irqs_uninstall(dev_priv);
3553 }
3554
3555 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3556 {
3557 dev_priv->irq_mask = ~0;
3558
3559 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3560 POSTING_READ(PORT_HOTPLUG_EN);
3561
3562 I915_WRITE(VLV_IIR, 0xffffffff);
3563 I915_WRITE(VLV_IIR, 0xffffffff);
3564 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3565 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3566 POSTING_READ(VLV_IMR);
3567
3568 /* Interrupt setup is already guaranteed to be single-threaded, this is
3569 * just to make the assert_spin_locked check happy. */
3570 spin_lock_irq(&dev_priv->irq_lock);
3571 if (dev_priv->display_irqs_enabled)
3572 valleyview_display_irqs_install(dev_priv);
3573 spin_unlock_irq(&dev_priv->irq_lock);
3574 }
3575
3576 static int valleyview_irq_postinstall(struct drm_device *dev)
3577 {
3578 struct drm_i915_private *dev_priv = dev->dev_private;
3579
3580 vlv_display_irq_postinstall(dev_priv);
3581
3582 gen5_gt_irq_postinstall(dev);
3583
3584 /* ack & enable invalid PTE error interrupts */
3585 #if 0 /* FIXME: add support to irq handler for checking these bits */
3586 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3587 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3588 #endif
3589
3590 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3591
3592 return 0;
3593 }
3594
3595 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3596 {
3597 /* These are interrupts we'll toggle with the ring mask register */
3598 uint32_t gt_interrupts[] = {
3599 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3600 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3601 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3602 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3603 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3604 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3605 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3606 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3607 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3608 0,
3609 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3610 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3611 };
3612
3613 dev_priv->pm_irq_mask = 0xffffffff;
3614 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3615 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3616 /*
3617 * RPS interrupts will get enabled/disabled on demand when RPS itself
3618 * is enabled/disabled.
3619 */
3620 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3621 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3622 }
3623
3624 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3625 {
3626 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3627 uint32_t de_pipe_enables;
3628 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3629 u32 de_port_enables;
3630 enum pipe pipe;
3631
3632 if (INTEL_INFO(dev_priv)->gen >= 9) {
3633 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3634 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3635 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3636 GEN9_AUX_CHANNEL_D;
3637 if (IS_BROXTON(dev_priv))
3638 de_port_masked |= BXT_DE_PORT_GMBUS;
3639 } else {
3640 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3641 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3642 }
3643
3644 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3645 GEN8_PIPE_FIFO_UNDERRUN;
3646
3647 de_port_enables = de_port_masked;
3648 if (IS_BROXTON(dev_priv))
3649 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3650 else if (IS_BROADWELL(dev_priv))
3651 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3652
3653 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3654 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3655 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3656
3657 for_each_pipe(dev_priv, pipe)
3658 if (intel_display_power_is_enabled(dev_priv,
3659 POWER_DOMAIN_PIPE(pipe)))
3660 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3661 dev_priv->de_irq_mask[pipe],
3662 de_pipe_enables);
3663
3664 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3665 }
3666
3667 static int gen8_irq_postinstall(struct drm_device *dev)
3668 {
3669 struct drm_i915_private *dev_priv = dev->dev_private;
3670
3671 if (HAS_PCH_SPLIT(dev))
3672 ibx_irq_pre_postinstall(dev);
3673
3674 gen8_gt_irq_postinstall(dev_priv);
3675 gen8_de_irq_postinstall(dev_priv);
3676
3677 if (HAS_PCH_SPLIT(dev))
3678 ibx_irq_postinstall(dev);
3679
3680 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3681 POSTING_READ(GEN8_MASTER_IRQ);
3682
3683 return 0;
3684 }
3685
3686 static int cherryview_irq_postinstall(struct drm_device *dev)
3687 {
3688 struct drm_i915_private *dev_priv = dev->dev_private;
3689
3690 vlv_display_irq_postinstall(dev_priv);
3691
3692 gen8_gt_irq_postinstall(dev_priv);
3693
3694 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3695 POSTING_READ(GEN8_MASTER_IRQ);
3696
3697 return 0;
3698 }
3699
3700 static void gen8_irq_uninstall(struct drm_device *dev)
3701 {
3702 struct drm_i915_private *dev_priv = dev->dev_private;
3703
3704 if (!dev_priv)
3705 return;
3706
3707 gen8_irq_reset(dev);
3708 }
3709
3710 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3711 {
3712 /* Interrupt setup is already guaranteed to be single-threaded, this is
3713 * just to make the assert_spin_locked check happy. */
3714 spin_lock_irq(&dev_priv->irq_lock);
3715 if (dev_priv->display_irqs_enabled)
3716 valleyview_display_irqs_uninstall(dev_priv);
3717 spin_unlock_irq(&dev_priv->irq_lock);
3718
3719 vlv_display_irq_reset(dev_priv);
3720
3721 dev_priv->irq_mask = ~0;
3722 }
3723
3724 static void valleyview_irq_uninstall(struct drm_device *dev)
3725 {
3726 struct drm_i915_private *dev_priv = dev->dev_private;
3727
3728 if (!dev_priv)
3729 return;
3730
3731 I915_WRITE(VLV_MASTER_IER, 0);
3732
3733 gen5_gt_irq_reset(dev);
3734
3735 I915_WRITE(HWSTAM, 0xffffffff);
3736
3737 vlv_display_irq_uninstall(dev_priv);
3738 }
3739
3740 static void cherryview_irq_uninstall(struct drm_device *dev)
3741 {
3742 struct drm_i915_private *dev_priv = dev->dev_private;
3743
3744 if (!dev_priv)
3745 return;
3746
3747 I915_WRITE(GEN8_MASTER_IRQ, 0);
3748 POSTING_READ(GEN8_MASTER_IRQ);
3749
3750 gen8_gt_irq_reset(dev_priv);
3751
3752 GEN5_IRQ_RESET(GEN8_PCU_);
3753
3754 vlv_display_irq_uninstall(dev_priv);
3755 }
3756
3757 static void ironlake_irq_uninstall(struct drm_device *dev)
3758 {
3759 struct drm_i915_private *dev_priv = dev->dev_private;
3760
3761 if (!dev_priv)
3762 return;
3763
3764 ironlake_irq_reset(dev);
3765 }
3766
3767 static void i8xx_irq_preinstall(struct drm_device * dev)
3768 {
3769 struct drm_i915_private *dev_priv = dev->dev_private;
3770 int pipe;
3771
3772 for_each_pipe(dev_priv, pipe)
3773 I915_WRITE(PIPESTAT(pipe), 0);
3774 I915_WRITE16(IMR, 0xffff);
3775 I915_WRITE16(IER, 0x0);
3776 POSTING_READ16(IER);
3777 }
3778
3779 static int i8xx_irq_postinstall(struct drm_device *dev)
3780 {
3781 struct drm_i915_private *dev_priv = dev->dev_private;
3782
3783 I915_WRITE16(EMR,
3784 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3785
3786 /* Unmask the interrupts that we always want on. */
3787 dev_priv->irq_mask =
3788 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3789 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3790 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3791 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3792 I915_WRITE16(IMR, dev_priv->irq_mask);
3793
3794 I915_WRITE16(IER,
3795 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3796 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3797 I915_USER_INTERRUPT);
3798 POSTING_READ16(IER);
3799
3800 /* Interrupt setup is already guaranteed to be single-threaded, this is
3801 * just to make the assert_spin_locked check happy. */
3802 spin_lock_irq(&dev_priv->irq_lock);
3803 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3804 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3805 spin_unlock_irq(&dev_priv->irq_lock);
3806
3807 return 0;
3808 }
3809
3810 /*
3811 * Returns true when a page flip has completed.
3812 */
3813 static bool i8xx_handle_vblank(struct drm_device *dev,
3814 int plane, int pipe, u32 iir)
3815 {
3816 struct drm_i915_private *dev_priv = dev->dev_private;
3817 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3818
3819 if (!intel_pipe_handle_vblank(dev, pipe))
3820 return false;
3821
3822 if ((iir & flip_pending) == 0)
3823 goto check_page_flip;
3824
3825 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3826 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3827 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3828 * the flip is completed (no longer pending). Since this doesn't raise
3829 * an interrupt per se, we watch for the change at vblank.
3830 */
3831 if (I915_READ16(ISR) & flip_pending)
3832 goto check_page_flip;
3833
3834 intel_prepare_page_flip(dev, plane);
3835 intel_finish_page_flip(dev, pipe);
3836 return true;
3837
3838 check_page_flip:
3839 intel_check_page_flip(dev, pipe);
3840 return false;
3841 }
3842
3843 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3844 {
3845 struct drm_device *dev = arg;
3846 struct drm_i915_private *dev_priv = dev->dev_private;
3847 u16 iir, new_iir;
3848 u32 pipe_stats[2];
3849 int pipe;
3850 u16 flip_mask =
3851 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3852 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3853
3854 if (!intel_irqs_enabled(dev_priv))
3855 return IRQ_NONE;
3856
3857 iir = I915_READ16(IIR);
3858 if (iir == 0)
3859 return IRQ_NONE;
3860
3861 while (iir & ~flip_mask) {
3862 /* Can't rely on pipestat interrupt bit in iir as it might
3863 * have been cleared after the pipestat interrupt was received.
3864 * It doesn't set the bit in iir again, but it still produces
3865 * interrupts (for non-MSI).
3866 */
3867 spin_lock(&dev_priv->irq_lock);
3868 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3869 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3870
3871 for_each_pipe(dev_priv, pipe) {
3872 int reg = PIPESTAT(pipe);
3873 pipe_stats[pipe] = I915_READ(reg);
3874
3875 /*
3876 * Clear the PIPE*STAT regs before the IIR
3877 */
3878 if (pipe_stats[pipe] & 0x8000ffff)
3879 I915_WRITE(reg, pipe_stats[pipe]);
3880 }
3881 spin_unlock(&dev_priv->irq_lock);
3882
3883 I915_WRITE16(IIR, iir & ~flip_mask);
3884 new_iir = I915_READ16(IIR); /* Flush posted writes */
3885
3886 if (iir & I915_USER_INTERRUPT)
3887 notify_ring(&dev_priv->ring[RCS]);
3888
3889 for_each_pipe(dev_priv, pipe) {
3890 int plane = pipe;
3891 if (HAS_FBC(dev))
3892 plane = !plane;
3893
3894 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3895 i8xx_handle_vblank(dev, plane, pipe, iir))
3896 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3897
3898 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3899 i9xx_pipe_crc_irq_handler(dev, pipe);
3900
3901 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3902 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3903 pipe);
3904 }
3905
3906 iir = new_iir;
3907 }
3908
3909 return IRQ_HANDLED;
3910 }
3911
3912 static void i8xx_irq_uninstall(struct drm_device * dev)
3913 {
3914 struct drm_i915_private *dev_priv = dev->dev_private;
3915 int pipe;
3916
3917 for_each_pipe(dev_priv, pipe) {
3918 /* Clear enable bits; then clear status bits */
3919 I915_WRITE(PIPESTAT(pipe), 0);
3920 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3921 }
3922 I915_WRITE16(IMR, 0xffff);
3923 I915_WRITE16(IER, 0x0);
3924 I915_WRITE16(IIR, I915_READ16(IIR));
3925 }
3926
3927 static void i915_irq_preinstall(struct drm_device * dev)
3928 {
3929 struct drm_i915_private *dev_priv = dev->dev_private;
3930 int pipe;
3931
3932 if (I915_HAS_HOTPLUG(dev)) {
3933 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3934 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3935 }
3936
3937 I915_WRITE16(HWSTAM, 0xeffe);
3938 for_each_pipe(dev_priv, pipe)
3939 I915_WRITE(PIPESTAT(pipe), 0);
3940 I915_WRITE(IMR, 0xffffffff);
3941 I915_WRITE(IER, 0x0);
3942 POSTING_READ(IER);
3943 }
3944
3945 static int i915_irq_postinstall(struct drm_device *dev)
3946 {
3947 struct drm_i915_private *dev_priv = dev->dev_private;
3948 u32 enable_mask;
3949
3950 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3951
3952 /* Unmask the interrupts that we always want on. */
3953 dev_priv->irq_mask =
3954 ~(I915_ASLE_INTERRUPT |
3955 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3956 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3957 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3958 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3959
3960 enable_mask =
3961 I915_ASLE_INTERRUPT |
3962 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3963 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3964 I915_USER_INTERRUPT;
3965
3966 if (I915_HAS_HOTPLUG(dev)) {
3967 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3968 POSTING_READ(PORT_HOTPLUG_EN);
3969
3970 /* Enable in IER... */
3971 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3972 /* and unmask in IMR */
3973 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3974 }
3975
3976 I915_WRITE(IMR, dev_priv->irq_mask);
3977 I915_WRITE(IER, enable_mask);
3978 POSTING_READ(IER);
3979
3980 i915_enable_asle_pipestat(dev);
3981
3982 /* Interrupt setup is already guaranteed to be single-threaded, this is
3983 * just to make the assert_spin_locked check happy. */
3984 spin_lock_irq(&dev_priv->irq_lock);
3985 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3986 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3987 spin_unlock_irq(&dev_priv->irq_lock);
3988
3989 return 0;
3990 }
3991
3992 /*
3993 * Returns true when a page flip has completed.
3994 */
3995 static bool i915_handle_vblank(struct drm_device *dev,
3996 int plane, int pipe, u32 iir)
3997 {
3998 struct drm_i915_private *dev_priv = dev->dev_private;
3999 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4000
4001 if (!intel_pipe_handle_vblank(dev, pipe))
4002 return false;
4003
4004 if ((iir & flip_pending) == 0)
4005 goto check_page_flip;
4006
4007 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4008 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4009 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4010 * the flip is completed (no longer pending). Since this doesn't raise
4011 * an interrupt per se, we watch for the change at vblank.
4012 */
4013 if (I915_READ(ISR) & flip_pending)
4014 goto check_page_flip;
4015
4016 intel_prepare_page_flip(dev, plane);
4017 intel_finish_page_flip(dev, pipe);
4018 return true;
4019
4020 check_page_flip:
4021 intel_check_page_flip(dev, pipe);
4022 return false;
4023 }
4024
4025 static irqreturn_t i915_irq_handler(int irq, void *arg)
4026 {
4027 struct drm_device *dev = arg;
4028 struct drm_i915_private *dev_priv = dev->dev_private;
4029 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4030 u32 flip_mask =
4031 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4032 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4033 int pipe, ret = IRQ_NONE;
4034
4035 if (!intel_irqs_enabled(dev_priv))
4036 return IRQ_NONE;
4037
4038 iir = I915_READ(IIR);
4039 do {
4040 bool irq_received = (iir & ~flip_mask) != 0;
4041 bool blc_event = false;
4042
4043 /* Can't rely on pipestat interrupt bit in iir as it might
4044 * have been cleared after the pipestat interrupt was received.
4045 * It doesn't set the bit in iir again, but it still produces
4046 * interrupts (for non-MSI).
4047 */
4048 spin_lock(&dev_priv->irq_lock);
4049 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4050 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4051
4052 for_each_pipe(dev_priv, pipe) {
4053 int reg = PIPESTAT(pipe);
4054 pipe_stats[pipe] = I915_READ(reg);
4055
4056 /* Clear the PIPE*STAT regs before the IIR */
4057 if (pipe_stats[pipe] & 0x8000ffff) {
4058 I915_WRITE(reg, pipe_stats[pipe]);
4059 irq_received = true;
4060 }
4061 }
4062 spin_unlock(&dev_priv->irq_lock);
4063
4064 if (!irq_received)
4065 break;
4066
4067 /* Consume port. Then clear IIR or we'll miss events */
4068 if (I915_HAS_HOTPLUG(dev) &&
4069 iir & I915_DISPLAY_PORT_INTERRUPT)
4070 i9xx_hpd_irq_handler(dev);
4071
4072 I915_WRITE(IIR, iir & ~flip_mask);
4073 new_iir = I915_READ(IIR); /* Flush posted writes */
4074
4075 if (iir & I915_USER_INTERRUPT)
4076 notify_ring(&dev_priv->ring[RCS]);
4077
4078 for_each_pipe(dev_priv, pipe) {
4079 int plane = pipe;
4080 if (HAS_FBC(dev))
4081 plane = !plane;
4082
4083 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4084 i915_handle_vblank(dev, plane, pipe, iir))
4085 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4086
4087 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4088 blc_event = true;
4089
4090 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4091 i9xx_pipe_crc_irq_handler(dev, pipe);
4092
4093 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4094 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4095 pipe);
4096 }
4097
4098 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4099 intel_opregion_asle_intr(dev);
4100
4101 /* With MSI, interrupts are only generated when iir
4102 * transitions from zero to nonzero. If another bit got
4103 * set while we were handling the existing iir bits, then
4104 * we would never get another interrupt.
4105 *
4106 * This is fine on non-MSI as well, as if we hit this path
4107 * we avoid exiting the interrupt handler only to generate
4108 * another one.
4109 *
4110 * Note that for MSI this could cause a stray interrupt report
4111 * if an interrupt landed in the time between writing IIR and
4112 * the posting read. This should be rare enough to never
4113 * trigger the 99% of 100,000 interrupts test for disabling
4114 * stray interrupts.
4115 */
4116 ret = IRQ_HANDLED;
4117 iir = new_iir;
4118 } while (iir & ~flip_mask);
4119
4120 return ret;
4121 }
4122
4123 static void i915_irq_uninstall(struct drm_device * dev)
4124 {
4125 struct drm_i915_private *dev_priv = dev->dev_private;
4126 int pipe;
4127
4128 if (I915_HAS_HOTPLUG(dev)) {
4129 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4130 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4131 }
4132
4133 I915_WRITE16(HWSTAM, 0xffff);
4134 for_each_pipe(dev_priv, pipe) {
4135 /* Clear enable bits; then clear status bits */
4136 I915_WRITE(PIPESTAT(pipe), 0);
4137 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4138 }
4139 I915_WRITE(IMR, 0xffffffff);
4140 I915_WRITE(IER, 0x0);
4141
4142 I915_WRITE(IIR, I915_READ(IIR));
4143 }
4144
4145 static void i965_irq_preinstall(struct drm_device * dev)
4146 {
4147 struct drm_i915_private *dev_priv = dev->dev_private;
4148 int pipe;
4149
4150 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4151 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4152
4153 I915_WRITE(HWSTAM, 0xeffe);
4154 for_each_pipe(dev_priv, pipe)
4155 I915_WRITE(PIPESTAT(pipe), 0);
4156 I915_WRITE(IMR, 0xffffffff);
4157 I915_WRITE(IER, 0x0);
4158 POSTING_READ(IER);
4159 }
4160
4161 static int i965_irq_postinstall(struct drm_device *dev)
4162 {
4163 struct drm_i915_private *dev_priv = dev->dev_private;
4164 u32 enable_mask;
4165 u32 error_mask;
4166
4167 /* Unmask the interrupts that we always want on. */
4168 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4169 I915_DISPLAY_PORT_INTERRUPT |
4170 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4171 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4172 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4173 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4174 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4175
4176 enable_mask = ~dev_priv->irq_mask;
4177 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4178 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4179 enable_mask |= I915_USER_INTERRUPT;
4180
4181 if (IS_G4X(dev))
4182 enable_mask |= I915_BSD_USER_INTERRUPT;
4183
4184 /* Interrupt setup is already guaranteed to be single-threaded, this is
4185 * just to make the assert_spin_locked check happy. */
4186 spin_lock_irq(&dev_priv->irq_lock);
4187 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4188 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4189 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4190 spin_unlock_irq(&dev_priv->irq_lock);
4191
4192 /*
4193 * Enable some error detection, note the instruction error mask
4194 * bit is reserved, so we leave it masked.
4195 */
4196 if (IS_G4X(dev)) {
4197 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4198 GM45_ERROR_MEM_PRIV |
4199 GM45_ERROR_CP_PRIV |
4200 I915_ERROR_MEMORY_REFRESH);
4201 } else {
4202 error_mask = ~(I915_ERROR_PAGE_TABLE |
4203 I915_ERROR_MEMORY_REFRESH);
4204 }
4205 I915_WRITE(EMR, error_mask);
4206
4207 I915_WRITE(IMR, dev_priv->irq_mask);
4208 I915_WRITE(IER, enable_mask);
4209 POSTING_READ(IER);
4210
4211 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4212 POSTING_READ(PORT_HOTPLUG_EN);
4213
4214 i915_enable_asle_pipestat(dev);
4215
4216 return 0;
4217 }
4218
4219 static void i915_hpd_irq_setup(struct drm_device *dev)
4220 {
4221 struct drm_i915_private *dev_priv = dev->dev_private;
4222 u32 hotplug_en;
4223
4224 assert_spin_locked(&dev_priv->irq_lock);
4225
4226 /* Note HDMI and DP share hotplug bits */
4227 /* enable bits are the same for all generations */
4228 hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4229 /* Programming the CRT detection parameters tends
4230 to generate a spurious hotplug event about three
4231 seconds later. So just do it once.
4232 */
4233 if (IS_G4X(dev))
4234 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4235 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4236
4237 /* Ignore TV since it's buggy */
4238 i915_hotplug_interrupt_update_locked(dev_priv,
4239 HOTPLUG_INT_EN_MASK |
4240 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4241 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4242 hotplug_en);
4243 }
4244
4245 static irqreturn_t i965_irq_handler(int irq, void *arg)
4246 {
4247 struct drm_device *dev = arg;
4248 struct drm_i915_private *dev_priv = dev->dev_private;
4249 u32 iir, new_iir;
4250 u32 pipe_stats[I915_MAX_PIPES];
4251 int ret = IRQ_NONE, pipe;
4252 u32 flip_mask =
4253 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4254 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4255
4256 if (!intel_irqs_enabled(dev_priv))
4257 return IRQ_NONE;
4258
4259 iir = I915_READ(IIR);
4260
4261 for (;;) {
4262 bool irq_received = (iir & ~flip_mask) != 0;
4263 bool blc_event = false;
4264
4265 /* Can't rely on pipestat interrupt bit in iir as it might
4266 * have been cleared after the pipestat interrupt was received.
4267 * It doesn't set the bit in iir again, but it still produces
4268 * interrupts (for non-MSI).
4269 */
4270 spin_lock(&dev_priv->irq_lock);
4271 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4272 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4273
4274 for_each_pipe(dev_priv, pipe) {
4275 int reg = PIPESTAT(pipe);
4276 pipe_stats[pipe] = I915_READ(reg);
4277
4278 /*
4279 * Clear the PIPE*STAT regs before the IIR
4280 */
4281 if (pipe_stats[pipe] & 0x8000ffff) {
4282 I915_WRITE(reg, pipe_stats[pipe]);
4283 irq_received = true;
4284 }
4285 }
4286 spin_unlock(&dev_priv->irq_lock);
4287
4288 if (!irq_received)
4289 break;
4290
4291 ret = IRQ_HANDLED;
4292
4293 /* Consume port. Then clear IIR or we'll miss events */
4294 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4295 i9xx_hpd_irq_handler(dev);
4296
4297 I915_WRITE(IIR, iir & ~flip_mask);
4298 new_iir = I915_READ(IIR); /* Flush posted writes */
4299
4300 if (iir & I915_USER_INTERRUPT)
4301 notify_ring(&dev_priv->ring[RCS]);
4302 if (iir & I915_BSD_USER_INTERRUPT)
4303 notify_ring(&dev_priv->ring[VCS]);
4304
4305 for_each_pipe(dev_priv, pipe) {
4306 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4307 i915_handle_vblank(dev, pipe, pipe, iir))
4308 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4309
4310 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4311 blc_event = true;
4312
4313 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4314 i9xx_pipe_crc_irq_handler(dev, pipe);
4315
4316 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4317 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4318 }
4319
4320 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4321 intel_opregion_asle_intr(dev);
4322
4323 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4324 gmbus_irq_handler(dev);
4325
4326 /* With MSI, interrupts are only generated when iir
4327 * transitions from zero to nonzero. If another bit got
4328 * set while we were handling the existing iir bits, then
4329 * we would never get another interrupt.
4330 *
4331 * This is fine on non-MSI as well, as if we hit this path
4332 * we avoid exiting the interrupt handler only to generate
4333 * another one.
4334 *
4335 * Note that for MSI this could cause a stray interrupt report
4336 * if an interrupt landed in the time between writing IIR and
4337 * the posting read. This should be rare enough to never
4338 * trigger the 99% of 100,000 interrupts test for disabling
4339 * stray interrupts.
4340 */
4341 iir = new_iir;
4342 }
4343
4344 return ret;
4345 }
4346
4347 static void i965_irq_uninstall(struct drm_device * dev)
4348 {
4349 struct drm_i915_private *dev_priv = dev->dev_private;
4350 int pipe;
4351
4352 if (!dev_priv)
4353 return;
4354
4355 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4356 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4357
4358 I915_WRITE(HWSTAM, 0xffffffff);
4359 for_each_pipe(dev_priv, pipe)
4360 I915_WRITE(PIPESTAT(pipe), 0);
4361 I915_WRITE(IMR, 0xffffffff);
4362 I915_WRITE(IER, 0x0);
4363
4364 for_each_pipe(dev_priv, pipe)
4365 I915_WRITE(PIPESTAT(pipe),
4366 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4367 I915_WRITE(IIR, I915_READ(IIR));
4368 }
4369
4370 /**
4371 * intel_irq_init - initializes irq support
4372 * @dev_priv: i915 device instance
4373 *
4374 * This function initializes all the irq support including work items, timers
4375 * and all the vtables. It does not setup the interrupt itself though.
4376 */
4377 void intel_irq_init(struct drm_i915_private *dev_priv)
4378 {
4379 struct drm_device *dev = dev_priv->dev;
4380
4381 intel_hpd_init_work(dev_priv);
4382
4383 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4384 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4385
4386 /* Let's track the enabled rps events */
4387 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4388 /* WaGsvRC0ResidencyMethod:vlv */
4389 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4390 else
4391 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4392
4393 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4394 i915_hangcheck_elapsed);
4395
4396 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4397
4398 if (IS_GEN2(dev_priv)) {
4399 dev->max_vblank_count = 0;
4400 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4401 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4402 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4403 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4404 } else {
4405 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4406 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4407 }
4408
4409 /*
4410 * Opt out of the vblank disable timer on everything except gen2.
4411 * Gen2 doesn't have a hardware frame counter and so depends on
4412 * vblank interrupts to produce sane vblank seuquence numbers.
4413 */
4414 if (!IS_GEN2(dev_priv))
4415 dev->vblank_disable_immediate = true;
4416
4417 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4418 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4419
4420 if (IS_CHERRYVIEW(dev_priv)) {
4421 dev->driver->irq_handler = cherryview_irq_handler;
4422 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4423 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4424 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4425 dev->driver->enable_vblank = valleyview_enable_vblank;
4426 dev->driver->disable_vblank = valleyview_disable_vblank;
4427 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4428 } else if (IS_VALLEYVIEW(dev_priv)) {
4429 dev->driver->irq_handler = valleyview_irq_handler;
4430 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4431 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4432 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4433 dev->driver->enable_vblank = valleyview_enable_vblank;
4434 dev->driver->disable_vblank = valleyview_disable_vblank;
4435 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4436 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4437 dev->driver->irq_handler = gen8_irq_handler;
4438 dev->driver->irq_preinstall = gen8_irq_reset;
4439 dev->driver->irq_postinstall = gen8_irq_postinstall;
4440 dev->driver->irq_uninstall = gen8_irq_uninstall;
4441 dev->driver->enable_vblank = gen8_enable_vblank;
4442 dev->driver->disable_vblank = gen8_disable_vblank;
4443 if (IS_BROXTON(dev))
4444 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4445 else if (HAS_PCH_SPT(dev))
4446 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4447 else
4448 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4449 } else if (HAS_PCH_SPLIT(dev)) {
4450 dev->driver->irq_handler = ironlake_irq_handler;
4451 dev->driver->irq_preinstall = ironlake_irq_reset;
4452 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4453 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4454 dev->driver->enable_vblank = ironlake_enable_vblank;
4455 dev->driver->disable_vblank = ironlake_disable_vblank;
4456 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4457 } else {
4458 if (INTEL_INFO(dev_priv)->gen == 2) {
4459 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4460 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4461 dev->driver->irq_handler = i8xx_irq_handler;
4462 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4463 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4464 dev->driver->irq_preinstall = i915_irq_preinstall;
4465 dev->driver->irq_postinstall = i915_irq_postinstall;
4466 dev->driver->irq_uninstall = i915_irq_uninstall;
4467 dev->driver->irq_handler = i915_irq_handler;
4468 } else {
4469 dev->driver->irq_preinstall = i965_irq_preinstall;
4470 dev->driver->irq_postinstall = i965_irq_postinstall;
4471 dev->driver->irq_uninstall = i965_irq_uninstall;
4472 dev->driver->irq_handler = i965_irq_handler;
4473 }
4474 if (I915_HAS_HOTPLUG(dev_priv))
4475 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4476 dev->driver->enable_vblank = i915_enable_vblank;
4477 dev->driver->disable_vblank = i915_disable_vblank;
4478 }
4479 }
4480
4481 /**
4482 * intel_irq_install - enables the hardware interrupt
4483 * @dev_priv: i915 device instance
4484 *
4485 * This function enables the hardware interrupt handling, but leaves the hotplug
4486 * handling still disabled. It is called after intel_irq_init().
4487 *
4488 * In the driver load and resume code we need working interrupts in a few places
4489 * but don't want to deal with the hassle of concurrent probe and hotplug
4490 * workers. Hence the split into this two-stage approach.
4491 */
4492 int intel_irq_install(struct drm_i915_private *dev_priv)
4493 {
4494 /*
4495 * We enable some interrupt sources in our postinstall hooks, so mark
4496 * interrupts as enabled _before_ actually enabling them to avoid
4497 * special cases in our ordering checks.
4498 */
4499 dev_priv->pm.irqs_enabled = true;
4500
4501 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4502 }
4503
4504 /**
4505 * intel_irq_uninstall - finilizes all irq handling
4506 * @dev_priv: i915 device instance
4507 *
4508 * This stops interrupt and hotplug handling and unregisters and frees all
4509 * resources acquired in the init functions.
4510 */
4511 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4512 {
4513 drm_irq_uninstall(dev_priv->dev);
4514 intel_hpd_cancel_work(dev_priv);
4515 dev_priv->pm.irqs_enabled = false;
4516 }
4517
4518 /**
4519 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4520 * @dev_priv: i915 device instance
4521 *
4522 * This function is used to disable interrupts at runtime, both in the runtime
4523 * pm and the system suspend/resume code.
4524 */
4525 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4526 {
4527 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4528 dev_priv->pm.irqs_enabled = false;
4529 synchronize_irq(dev_priv->dev->irq);
4530 }
4531
4532 /**
4533 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4534 * @dev_priv: i915 device instance
4535 *
4536 * This function is used to enable interrupts at runtime, both in the runtime
4537 * pm and the system suspend/resume code.
4538 */
4539 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4540 {
4541 dev_priv->pm.irqs_enabled = true;
4542 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4543 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4544 }