]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/i915/i915_irq.c
drm/i915: Allocate intel_engine_cs structure only for the enabled engines
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 /**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
48 static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50 };
51
52 static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54 };
55
56 static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58 };
59
60 static const u32 hpd_ibx[HPD_NUM_PINS] = {
61 [HPD_CRT] = SDE_CRT_HOTPLUG,
62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66 };
67
68 static const u32 hpd_cpt[HPD_NUM_PINS] = {
69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74 };
75
76 static const u32 hpd_spt[HPD_NUM_PINS] = {
77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82 };
83
84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91 };
92
93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100 };
101
102 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109 };
110
111 /* BXT hpd list */
112 static const u32 hpd_bxt[HPD_NUM_PINS] = {
113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116 };
117
118 /* IIR can theoretically queue up two events. Be paranoid. */
119 #define GEN8_IRQ_RESET_NDX(type, which) do { \
120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IMR(which)); \
122 I915_WRITE(GEN8_##type##_IER(which), 0); \
123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 POSTING_READ(GEN8_##type##_IIR(which)); \
125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 POSTING_READ(GEN8_##type##_IIR(which)); \
127 } while (0)
128
129 #define GEN5_IRQ_RESET(type) do { \
130 I915_WRITE(type##IMR, 0xffffffff); \
131 POSTING_READ(type##IMR); \
132 I915_WRITE(type##IER, 0); \
133 I915_WRITE(type##IIR, 0xffffffff); \
134 POSTING_READ(type##IIR); \
135 I915_WRITE(type##IIR, 0xffffffff); \
136 POSTING_READ(type##IIR); \
137 } while (0)
138
139 /*
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141 */
142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143 i915_reg_t reg)
144 {
145 u32 val = I915_READ(reg);
146
147 if (val == 0)
148 return;
149
150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
151 i915_mmio_reg_offset(reg), val);
152 I915_WRITE(reg, 0xffffffff);
153 POSTING_READ(reg);
154 I915_WRITE(reg, 0xffffffff);
155 POSTING_READ(reg);
156 }
157
158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
162 POSTING_READ(GEN8_##type##_IMR(which)); \
163 } while (0)
164
165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
167 I915_WRITE(type##IER, (ier_val)); \
168 I915_WRITE(type##IMR, (imr_val)); \
169 POSTING_READ(type##IMR); \
170 } while (0)
171
172 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
173
174 /* For display hotplug interrupt */
175 static inline void
176 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
177 uint32_t mask,
178 uint32_t bits)
179 {
180 uint32_t val;
181
182 assert_spin_locked(&dev_priv->irq_lock);
183 WARN_ON(bits & ~mask);
184
185 val = I915_READ(PORT_HOTPLUG_EN);
186 val &= ~mask;
187 val |= bits;
188 I915_WRITE(PORT_HOTPLUG_EN, val);
189 }
190
191 /**
192 * i915_hotplug_interrupt_update - update hotplug interrupt enable
193 * @dev_priv: driver private
194 * @mask: bits to update
195 * @bits: bits to enable
196 * NOTE: the HPD enable bits are modified both inside and outside
197 * of an interrupt context. To avoid that read-modify-write cycles
198 * interfer, these bits are protected by a spinlock. Since this
199 * function is usually not called from a context where the lock is
200 * held already, this function acquires the lock itself. A non-locking
201 * version is also available.
202 */
203 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
204 uint32_t mask,
205 uint32_t bits)
206 {
207 spin_lock_irq(&dev_priv->irq_lock);
208 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
209 spin_unlock_irq(&dev_priv->irq_lock);
210 }
211
212 /**
213 * ilk_update_display_irq - update DEIMR
214 * @dev_priv: driver private
215 * @interrupt_mask: mask of interrupt bits to update
216 * @enabled_irq_mask: mask of interrupt bits to enable
217 */
218 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
219 uint32_t interrupt_mask,
220 uint32_t enabled_irq_mask)
221 {
222 uint32_t new_val;
223
224 assert_spin_locked(&dev_priv->irq_lock);
225
226 WARN_ON(enabled_irq_mask & ~interrupt_mask);
227
228 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
229 return;
230
231 new_val = dev_priv->irq_mask;
232 new_val &= ~interrupt_mask;
233 new_val |= (~enabled_irq_mask & interrupt_mask);
234
235 if (new_val != dev_priv->irq_mask) {
236 dev_priv->irq_mask = new_val;
237 I915_WRITE(DEIMR, dev_priv->irq_mask);
238 POSTING_READ(DEIMR);
239 }
240 }
241
242 /**
243 * ilk_update_gt_irq - update GTIMR
244 * @dev_priv: driver private
245 * @interrupt_mask: mask of interrupt bits to update
246 * @enabled_irq_mask: mask of interrupt bits to enable
247 */
248 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
249 uint32_t interrupt_mask,
250 uint32_t enabled_irq_mask)
251 {
252 assert_spin_locked(&dev_priv->irq_lock);
253
254 WARN_ON(enabled_irq_mask & ~interrupt_mask);
255
256 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
257 return;
258
259 dev_priv->gt_irq_mask &= ~interrupt_mask;
260 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
261 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
262 }
263
264 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
265 {
266 ilk_update_gt_irq(dev_priv, mask, mask);
267 POSTING_READ_FW(GTIMR);
268 }
269
270 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
271 {
272 ilk_update_gt_irq(dev_priv, mask, 0);
273 }
274
275 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
276 {
277 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
278 }
279
280 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
281 {
282 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
283 }
284
285 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
286 {
287 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
288 }
289
290 /**
291 * snb_update_pm_irq - update GEN6_PMIMR
292 * @dev_priv: driver private
293 * @interrupt_mask: mask of interrupt bits to update
294 * @enabled_irq_mask: mask of interrupt bits to enable
295 */
296 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
297 uint32_t interrupt_mask,
298 uint32_t enabled_irq_mask)
299 {
300 uint32_t new_val;
301
302 WARN_ON(enabled_irq_mask & ~interrupt_mask);
303
304 assert_spin_locked(&dev_priv->irq_lock);
305
306 new_val = dev_priv->pm_irq_mask;
307 new_val &= ~interrupt_mask;
308 new_val |= (~enabled_irq_mask & interrupt_mask);
309
310 if (new_val != dev_priv->pm_irq_mask) {
311 dev_priv->pm_irq_mask = new_val;
312 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
313 POSTING_READ(gen6_pm_imr(dev_priv));
314 }
315 }
316
317 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
318 {
319 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
320 return;
321
322 snb_update_pm_irq(dev_priv, mask, mask);
323 }
324
325 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
326 uint32_t mask)
327 {
328 snb_update_pm_irq(dev_priv, mask, 0);
329 }
330
331 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
332 {
333 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
334 return;
335
336 __gen6_disable_pm_irq(dev_priv, mask);
337 }
338
339 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
340 {
341 i915_reg_t reg = gen6_pm_iir(dev_priv);
342
343 spin_lock_irq(&dev_priv->irq_lock);
344 I915_WRITE(reg, dev_priv->pm_rps_events);
345 I915_WRITE(reg, dev_priv->pm_rps_events);
346 POSTING_READ(reg);
347 dev_priv->rps.pm_iir = 0;
348 spin_unlock_irq(&dev_priv->irq_lock);
349 }
350
351 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
352 {
353 if (READ_ONCE(dev_priv->rps.interrupts_enabled))
354 return;
355
356 spin_lock_irq(&dev_priv->irq_lock);
357 WARN_ON_ONCE(dev_priv->rps.pm_iir);
358 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
359 dev_priv->rps.interrupts_enabled = true;
360 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
361 dev_priv->pm_rps_events);
362 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
363
364 spin_unlock_irq(&dev_priv->irq_lock);
365 }
366
367 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
368 {
369 return (mask & ~dev_priv->rps.pm_intr_keep);
370 }
371
372 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
373 {
374 if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
375 return;
376
377 spin_lock_irq(&dev_priv->irq_lock);
378 dev_priv->rps.interrupts_enabled = false;
379
380 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
381
382 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
383 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
384 ~dev_priv->pm_rps_events);
385
386 spin_unlock_irq(&dev_priv->irq_lock);
387 synchronize_irq(dev_priv->drm.irq);
388
389 /* Now that we will not be generating any more work, flush any
390 * outsanding tasks. As we are called on the RPS idle path,
391 * we will reset the GPU to minimum frequencies, so the current
392 * state of the worker can be discarded.
393 */
394 cancel_work_sync(&dev_priv->rps.work);
395 gen6_reset_rps_interrupts(dev_priv);
396 }
397
398 /**
399 * bdw_update_port_irq - update DE port interrupt
400 * @dev_priv: driver private
401 * @interrupt_mask: mask of interrupt bits to update
402 * @enabled_irq_mask: mask of interrupt bits to enable
403 */
404 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
405 uint32_t interrupt_mask,
406 uint32_t enabled_irq_mask)
407 {
408 uint32_t new_val;
409 uint32_t old_val;
410
411 assert_spin_locked(&dev_priv->irq_lock);
412
413 WARN_ON(enabled_irq_mask & ~interrupt_mask);
414
415 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
416 return;
417
418 old_val = I915_READ(GEN8_DE_PORT_IMR);
419
420 new_val = old_val;
421 new_val &= ~interrupt_mask;
422 new_val |= (~enabled_irq_mask & interrupt_mask);
423
424 if (new_val != old_val) {
425 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
426 POSTING_READ(GEN8_DE_PORT_IMR);
427 }
428 }
429
430 /**
431 * bdw_update_pipe_irq - update DE pipe interrupt
432 * @dev_priv: driver private
433 * @pipe: pipe whose interrupt to update
434 * @interrupt_mask: mask of interrupt bits to update
435 * @enabled_irq_mask: mask of interrupt bits to enable
436 */
437 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
438 enum pipe pipe,
439 uint32_t interrupt_mask,
440 uint32_t enabled_irq_mask)
441 {
442 uint32_t new_val;
443
444 assert_spin_locked(&dev_priv->irq_lock);
445
446 WARN_ON(enabled_irq_mask & ~interrupt_mask);
447
448 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
449 return;
450
451 new_val = dev_priv->de_irq_mask[pipe];
452 new_val &= ~interrupt_mask;
453 new_val |= (~enabled_irq_mask & interrupt_mask);
454
455 if (new_val != dev_priv->de_irq_mask[pipe]) {
456 dev_priv->de_irq_mask[pipe] = new_val;
457 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
458 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
459 }
460 }
461
462 /**
463 * ibx_display_interrupt_update - update SDEIMR
464 * @dev_priv: driver private
465 * @interrupt_mask: mask of interrupt bits to update
466 * @enabled_irq_mask: mask of interrupt bits to enable
467 */
468 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
469 uint32_t interrupt_mask,
470 uint32_t enabled_irq_mask)
471 {
472 uint32_t sdeimr = I915_READ(SDEIMR);
473 sdeimr &= ~interrupt_mask;
474 sdeimr |= (~enabled_irq_mask & interrupt_mask);
475
476 WARN_ON(enabled_irq_mask & ~interrupt_mask);
477
478 assert_spin_locked(&dev_priv->irq_lock);
479
480 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
481 return;
482
483 I915_WRITE(SDEIMR, sdeimr);
484 POSTING_READ(SDEIMR);
485 }
486
487 static void
488 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
489 u32 enable_mask, u32 status_mask)
490 {
491 i915_reg_t reg = PIPESTAT(pipe);
492 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
493
494 assert_spin_locked(&dev_priv->irq_lock);
495 WARN_ON(!intel_irqs_enabled(dev_priv));
496
497 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
498 status_mask & ~PIPESTAT_INT_STATUS_MASK,
499 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
500 pipe_name(pipe), enable_mask, status_mask))
501 return;
502
503 if ((pipestat & enable_mask) == enable_mask)
504 return;
505
506 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
507
508 /* Enable the interrupt, clear any pending status */
509 pipestat |= enable_mask | status_mask;
510 I915_WRITE(reg, pipestat);
511 POSTING_READ(reg);
512 }
513
514 static void
515 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
516 u32 enable_mask, u32 status_mask)
517 {
518 i915_reg_t reg = PIPESTAT(pipe);
519 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
520
521 assert_spin_locked(&dev_priv->irq_lock);
522 WARN_ON(!intel_irqs_enabled(dev_priv));
523
524 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
525 status_mask & ~PIPESTAT_INT_STATUS_MASK,
526 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
527 pipe_name(pipe), enable_mask, status_mask))
528 return;
529
530 if ((pipestat & enable_mask) == 0)
531 return;
532
533 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
534
535 pipestat &= ~enable_mask;
536 I915_WRITE(reg, pipestat);
537 POSTING_READ(reg);
538 }
539
540 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
541 {
542 u32 enable_mask = status_mask << 16;
543
544 /*
545 * On pipe A we don't support the PSR interrupt yet,
546 * on pipe B and C the same bit MBZ.
547 */
548 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
549 return 0;
550 /*
551 * On pipe B and C we don't support the PSR interrupt yet, on pipe
552 * A the same bit is for perf counters which we don't use either.
553 */
554 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
555 return 0;
556
557 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
558 SPRITE0_FLIP_DONE_INT_EN_VLV |
559 SPRITE1_FLIP_DONE_INT_EN_VLV);
560 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
561 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
562 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
563 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
564
565 return enable_mask;
566 }
567
568 void
569 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
570 u32 status_mask)
571 {
572 u32 enable_mask;
573
574 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
575 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
576 status_mask);
577 else
578 enable_mask = status_mask << 16;
579 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
580 }
581
582 void
583 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
584 u32 status_mask)
585 {
586 u32 enable_mask;
587
588 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
589 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
590 status_mask);
591 else
592 enable_mask = status_mask << 16;
593 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
594 }
595
596 /**
597 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
598 * @dev_priv: i915 device private
599 */
600 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
601 {
602 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
603 return;
604
605 spin_lock_irq(&dev_priv->irq_lock);
606
607 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
608 if (INTEL_GEN(dev_priv) >= 4)
609 i915_enable_pipestat(dev_priv, PIPE_A,
610 PIPE_LEGACY_BLC_EVENT_STATUS);
611
612 spin_unlock_irq(&dev_priv->irq_lock);
613 }
614
615 /*
616 * This timing diagram depicts the video signal in and
617 * around the vertical blanking period.
618 *
619 * Assumptions about the fictitious mode used in this example:
620 * vblank_start >= 3
621 * vsync_start = vblank_start + 1
622 * vsync_end = vblank_start + 2
623 * vtotal = vblank_start + 3
624 *
625 * start of vblank:
626 * latch double buffered registers
627 * increment frame counter (ctg+)
628 * generate start of vblank interrupt (gen4+)
629 * |
630 * | frame start:
631 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
632 * | may be shifted forward 1-3 extra lines via PIPECONF
633 * | |
634 * | | start of vsync:
635 * | | generate vsync interrupt
636 * | | |
637 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
638 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
639 * ----va---> <-----------------vb--------------------> <--------va-------------
640 * | | <----vs-----> |
641 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
642 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
643 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
644 * | | |
645 * last visible pixel first visible pixel
646 * | increment frame counter (gen3/4)
647 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
648 *
649 * x = horizontal active
650 * _ = horizontal blanking
651 * hs = horizontal sync
652 * va = vertical active
653 * vb = vertical blanking
654 * vs = vertical sync
655 * vbs = vblank_start (number)
656 *
657 * Summary:
658 * - most events happen at the start of horizontal sync
659 * - frame start happens at the start of horizontal blank, 1-4 lines
660 * (depending on PIPECONF settings) after the start of vblank
661 * - gen3/4 pixel and frame counter are synchronized with the start
662 * of horizontal active on the first line of vertical active
663 */
664
665 /* Called from drm generic code, passed a 'crtc', which
666 * we use as a pipe index
667 */
668 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
669 {
670 struct drm_i915_private *dev_priv = to_i915(dev);
671 i915_reg_t high_frame, low_frame;
672 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
673 struct intel_crtc *intel_crtc =
674 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
675 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
676
677 htotal = mode->crtc_htotal;
678 hsync_start = mode->crtc_hsync_start;
679 vbl_start = mode->crtc_vblank_start;
680 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
681 vbl_start = DIV_ROUND_UP(vbl_start, 2);
682
683 /* Convert to pixel count */
684 vbl_start *= htotal;
685
686 /* Start of vblank event occurs at start of hsync */
687 vbl_start -= htotal - hsync_start;
688
689 high_frame = PIPEFRAME(pipe);
690 low_frame = PIPEFRAMEPIXEL(pipe);
691
692 /*
693 * High & low register fields aren't synchronized, so make sure
694 * we get a low value that's stable across two reads of the high
695 * register.
696 */
697 do {
698 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
699 low = I915_READ(low_frame);
700 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
701 } while (high1 != high2);
702
703 high1 >>= PIPE_FRAME_HIGH_SHIFT;
704 pixel = low & PIPE_PIXEL_MASK;
705 low >>= PIPE_FRAME_LOW_SHIFT;
706
707 /*
708 * The frame counter increments at beginning of active.
709 * Cook up a vblank counter by also checking the pixel
710 * counter against vblank start.
711 */
712 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
713 }
714
715 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
716 {
717 struct drm_i915_private *dev_priv = to_i915(dev);
718
719 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
720 }
721
722 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
723 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
724 {
725 struct drm_device *dev = crtc->base.dev;
726 struct drm_i915_private *dev_priv = to_i915(dev);
727 const struct drm_display_mode *mode = &crtc->base.hwmode;
728 enum pipe pipe = crtc->pipe;
729 int position, vtotal;
730
731 vtotal = mode->crtc_vtotal;
732 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
733 vtotal /= 2;
734
735 if (IS_GEN2(dev_priv))
736 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
737 else
738 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
739
740 /*
741 * On HSW, the DSL reg (0x70000) appears to return 0 if we
742 * read it just before the start of vblank. So try it again
743 * so we don't accidentally end up spanning a vblank frame
744 * increment, causing the pipe_update_end() code to squak at us.
745 *
746 * The nature of this problem means we can't simply check the ISR
747 * bit and return the vblank start value; nor can we use the scanline
748 * debug register in the transcoder as it appears to have the same
749 * problem. We may need to extend this to include other platforms,
750 * but so far testing only shows the problem on HSW.
751 */
752 if (HAS_DDI(dev_priv) && !position) {
753 int i, temp;
754
755 for (i = 0; i < 100; i++) {
756 udelay(1);
757 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
758 DSL_LINEMASK_GEN3;
759 if (temp != position) {
760 position = temp;
761 break;
762 }
763 }
764 }
765
766 /*
767 * See update_scanline_offset() for the details on the
768 * scanline_offset adjustment.
769 */
770 return (position + crtc->scanline_offset) % vtotal;
771 }
772
773 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
774 unsigned int flags, int *vpos, int *hpos,
775 ktime_t *stime, ktime_t *etime,
776 const struct drm_display_mode *mode)
777 {
778 struct drm_i915_private *dev_priv = to_i915(dev);
779 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
780 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
781 int position;
782 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
783 bool in_vbl = true;
784 int ret = 0;
785 unsigned long irqflags;
786
787 if (WARN_ON(!mode->crtc_clock)) {
788 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
789 "pipe %c\n", pipe_name(pipe));
790 return 0;
791 }
792
793 htotal = mode->crtc_htotal;
794 hsync_start = mode->crtc_hsync_start;
795 vtotal = mode->crtc_vtotal;
796 vbl_start = mode->crtc_vblank_start;
797 vbl_end = mode->crtc_vblank_end;
798
799 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
800 vbl_start = DIV_ROUND_UP(vbl_start, 2);
801 vbl_end /= 2;
802 vtotal /= 2;
803 }
804
805 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
806
807 /*
808 * Lock uncore.lock, as we will do multiple timing critical raw
809 * register reads, potentially with preemption disabled, so the
810 * following code must not block on uncore.lock.
811 */
812 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
813
814 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
815
816 /* Get optional system timestamp before query. */
817 if (stime)
818 *stime = ktime_get();
819
820 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
821 /* No obvious pixelcount register. Only query vertical
822 * scanout position from Display scan line register.
823 */
824 position = __intel_get_crtc_scanline(intel_crtc);
825 } else {
826 /* Have access to pixelcount since start of frame.
827 * We can split this into vertical and horizontal
828 * scanout position.
829 */
830 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
831
832 /* convert to pixel counts */
833 vbl_start *= htotal;
834 vbl_end *= htotal;
835 vtotal *= htotal;
836
837 /*
838 * In interlaced modes, the pixel counter counts all pixels,
839 * so one field will have htotal more pixels. In order to avoid
840 * the reported position from jumping backwards when the pixel
841 * counter is beyond the length of the shorter field, just
842 * clamp the position the length of the shorter field. This
843 * matches how the scanline counter based position works since
844 * the scanline counter doesn't count the two half lines.
845 */
846 if (position >= vtotal)
847 position = vtotal - 1;
848
849 /*
850 * Start of vblank interrupt is triggered at start of hsync,
851 * just prior to the first active line of vblank. However we
852 * consider lines to start at the leading edge of horizontal
853 * active. So, should we get here before we've crossed into
854 * the horizontal active of the first line in vblank, we would
855 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
856 * always add htotal-hsync_start to the current pixel position.
857 */
858 position = (position + htotal - hsync_start) % vtotal;
859 }
860
861 /* Get optional system timestamp after query. */
862 if (etime)
863 *etime = ktime_get();
864
865 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
866
867 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
868
869 in_vbl = position >= vbl_start && position < vbl_end;
870
871 /*
872 * While in vblank, position will be negative
873 * counting up towards 0 at vbl_end. And outside
874 * vblank, position will be positive counting
875 * up since vbl_end.
876 */
877 if (position >= vbl_start)
878 position -= vbl_end;
879 else
880 position += vtotal - vbl_end;
881
882 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
883 *vpos = position;
884 *hpos = 0;
885 } else {
886 *vpos = position / htotal;
887 *hpos = position - (*vpos * htotal);
888 }
889
890 /* In vblank? */
891 if (in_vbl)
892 ret |= DRM_SCANOUTPOS_IN_VBLANK;
893
894 return ret;
895 }
896
897 int intel_get_crtc_scanline(struct intel_crtc *crtc)
898 {
899 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
900 unsigned long irqflags;
901 int position;
902
903 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
904 position = __intel_get_crtc_scanline(crtc);
905 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
906
907 return position;
908 }
909
910 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
911 int *max_error,
912 struct timeval *vblank_time,
913 unsigned flags)
914 {
915 struct drm_crtc *crtc;
916
917 if (pipe >= INTEL_INFO(dev)->num_pipes) {
918 DRM_ERROR("Invalid crtc %u\n", pipe);
919 return -EINVAL;
920 }
921
922 /* Get drm_crtc to timestamp: */
923 crtc = intel_get_crtc_for_pipe(dev, pipe);
924 if (crtc == NULL) {
925 DRM_ERROR("Invalid crtc %u\n", pipe);
926 return -EINVAL;
927 }
928
929 if (!crtc->hwmode.crtc_clock) {
930 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
931 return -EBUSY;
932 }
933
934 /* Helper routine in DRM core does all the work: */
935 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
936 vblank_time, flags,
937 &crtc->hwmode);
938 }
939
940 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
941 {
942 u32 busy_up, busy_down, max_avg, min_avg;
943 u8 new_delay;
944
945 spin_lock(&mchdev_lock);
946
947 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
948
949 new_delay = dev_priv->ips.cur_delay;
950
951 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
952 busy_up = I915_READ(RCPREVBSYTUPAVG);
953 busy_down = I915_READ(RCPREVBSYTDNAVG);
954 max_avg = I915_READ(RCBMAXAVG);
955 min_avg = I915_READ(RCBMINAVG);
956
957 /* Handle RCS change request from hw */
958 if (busy_up > max_avg) {
959 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
960 new_delay = dev_priv->ips.cur_delay - 1;
961 if (new_delay < dev_priv->ips.max_delay)
962 new_delay = dev_priv->ips.max_delay;
963 } else if (busy_down < min_avg) {
964 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
965 new_delay = dev_priv->ips.cur_delay + 1;
966 if (new_delay > dev_priv->ips.min_delay)
967 new_delay = dev_priv->ips.min_delay;
968 }
969
970 if (ironlake_set_drps(dev_priv, new_delay))
971 dev_priv->ips.cur_delay = new_delay;
972
973 spin_unlock(&mchdev_lock);
974
975 return;
976 }
977
978 static void notify_ring(struct intel_engine_cs *engine)
979 {
980 smp_store_mb(engine->breadcrumbs.irq_posted, true);
981 if (intel_engine_wakeup(engine))
982 trace_i915_gem_request_notify(engine);
983 }
984
985 static void vlv_c0_read(struct drm_i915_private *dev_priv,
986 struct intel_rps_ei *ei)
987 {
988 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
989 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
990 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
991 }
992
993 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
994 const struct intel_rps_ei *old,
995 const struct intel_rps_ei *now,
996 int threshold)
997 {
998 u64 time, c0;
999 unsigned int mul = 100;
1000
1001 if (old->cz_clock == 0)
1002 return false;
1003
1004 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1005 mul <<= 8;
1006
1007 time = now->cz_clock - old->cz_clock;
1008 time *= threshold * dev_priv->czclk_freq;
1009
1010 /* Workload can be split between render + media, e.g. SwapBuffers
1011 * being blitted in X after being rendered in mesa. To account for
1012 * this we need to combine both engines into our activity counter.
1013 */
1014 c0 = now->render_c0 - old->render_c0;
1015 c0 += now->media_c0 - old->media_c0;
1016 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1017
1018 return c0 >= time;
1019 }
1020
1021 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1022 {
1023 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1024 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1025 }
1026
1027 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1028 {
1029 struct intel_rps_ei now;
1030 u32 events = 0;
1031
1032 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1033 return 0;
1034
1035 vlv_c0_read(dev_priv, &now);
1036 if (now.cz_clock == 0)
1037 return 0;
1038
1039 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1040 if (!vlv_c0_above(dev_priv,
1041 &dev_priv->rps.down_ei, &now,
1042 dev_priv->rps.down_threshold))
1043 events |= GEN6_PM_RP_DOWN_THRESHOLD;
1044 dev_priv->rps.down_ei = now;
1045 }
1046
1047 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1048 if (vlv_c0_above(dev_priv,
1049 &dev_priv->rps.up_ei, &now,
1050 dev_priv->rps.up_threshold))
1051 events |= GEN6_PM_RP_UP_THRESHOLD;
1052 dev_priv->rps.up_ei = now;
1053 }
1054
1055 return events;
1056 }
1057
1058 static bool any_waiters(struct drm_i915_private *dev_priv)
1059 {
1060 struct intel_engine_cs *engine;
1061 enum intel_engine_id id;
1062
1063 for_each_engine(engine, dev_priv, id)
1064 if (intel_engine_has_waiter(engine))
1065 return true;
1066
1067 return false;
1068 }
1069
1070 static void gen6_pm_rps_work(struct work_struct *work)
1071 {
1072 struct drm_i915_private *dev_priv =
1073 container_of(work, struct drm_i915_private, rps.work);
1074 bool client_boost;
1075 int new_delay, adj, min, max;
1076 u32 pm_iir;
1077
1078 spin_lock_irq(&dev_priv->irq_lock);
1079 /* Speed up work cancelation during disabling rps interrupts. */
1080 if (!dev_priv->rps.interrupts_enabled) {
1081 spin_unlock_irq(&dev_priv->irq_lock);
1082 return;
1083 }
1084
1085 pm_iir = dev_priv->rps.pm_iir;
1086 dev_priv->rps.pm_iir = 0;
1087 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1088 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1089 client_boost = dev_priv->rps.client_boost;
1090 dev_priv->rps.client_boost = false;
1091 spin_unlock_irq(&dev_priv->irq_lock);
1092
1093 /* Make sure we didn't queue anything we're not going to process. */
1094 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1095
1096 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1097 return;
1098
1099 mutex_lock(&dev_priv->rps.hw_lock);
1100
1101 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1102
1103 adj = dev_priv->rps.last_adj;
1104 new_delay = dev_priv->rps.cur_freq;
1105 min = dev_priv->rps.min_freq_softlimit;
1106 max = dev_priv->rps.max_freq_softlimit;
1107 if (client_boost || any_waiters(dev_priv))
1108 max = dev_priv->rps.max_freq;
1109 if (client_boost && new_delay < dev_priv->rps.boost_freq) {
1110 new_delay = dev_priv->rps.boost_freq;
1111 adj = 0;
1112 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1113 if (adj > 0)
1114 adj *= 2;
1115 else /* CHV needs even encode values */
1116 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1117 /*
1118 * For better performance, jump directly
1119 * to RPe if we're below it.
1120 */
1121 if (new_delay < dev_priv->rps.efficient_freq - adj) {
1122 new_delay = dev_priv->rps.efficient_freq;
1123 adj = 0;
1124 }
1125 } else if (client_boost || any_waiters(dev_priv)) {
1126 adj = 0;
1127 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1128 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1129 new_delay = dev_priv->rps.efficient_freq;
1130 else
1131 new_delay = dev_priv->rps.min_freq_softlimit;
1132 adj = 0;
1133 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1134 if (adj < 0)
1135 adj *= 2;
1136 else /* CHV needs even encode values */
1137 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1138 } else { /* unknown event */
1139 adj = 0;
1140 }
1141
1142 dev_priv->rps.last_adj = adj;
1143
1144 /* sysfs frequency interfaces may have snuck in while servicing the
1145 * interrupt
1146 */
1147 new_delay += adj;
1148 new_delay = clamp_t(int, new_delay, min, max);
1149
1150 intel_set_rps(dev_priv, new_delay);
1151
1152 mutex_unlock(&dev_priv->rps.hw_lock);
1153 }
1154
1155
1156 /**
1157 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1158 * occurred.
1159 * @work: workqueue struct
1160 *
1161 * Doesn't actually do anything except notify userspace. As a consequence of
1162 * this event, userspace should try to remap the bad rows since statistically
1163 * it is likely the same row is more likely to go bad again.
1164 */
1165 static void ivybridge_parity_work(struct work_struct *work)
1166 {
1167 struct drm_i915_private *dev_priv =
1168 container_of(work, struct drm_i915_private, l3_parity.error_work);
1169 u32 error_status, row, bank, subbank;
1170 char *parity_event[6];
1171 uint32_t misccpctl;
1172 uint8_t slice = 0;
1173
1174 /* We must turn off DOP level clock gating to access the L3 registers.
1175 * In order to prevent a get/put style interface, acquire struct mutex
1176 * any time we access those registers.
1177 */
1178 mutex_lock(&dev_priv->drm.struct_mutex);
1179
1180 /* If we've screwed up tracking, just let the interrupt fire again */
1181 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1182 goto out;
1183
1184 misccpctl = I915_READ(GEN7_MISCCPCTL);
1185 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1186 POSTING_READ(GEN7_MISCCPCTL);
1187
1188 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1189 i915_reg_t reg;
1190
1191 slice--;
1192 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1193 break;
1194
1195 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1196
1197 reg = GEN7_L3CDERRST1(slice);
1198
1199 error_status = I915_READ(reg);
1200 row = GEN7_PARITY_ERROR_ROW(error_status);
1201 bank = GEN7_PARITY_ERROR_BANK(error_status);
1202 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1203
1204 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1205 POSTING_READ(reg);
1206
1207 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1208 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1209 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1210 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1211 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1212 parity_event[5] = NULL;
1213
1214 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1215 KOBJ_CHANGE, parity_event);
1216
1217 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1218 slice, row, bank, subbank);
1219
1220 kfree(parity_event[4]);
1221 kfree(parity_event[3]);
1222 kfree(parity_event[2]);
1223 kfree(parity_event[1]);
1224 }
1225
1226 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1227
1228 out:
1229 WARN_ON(dev_priv->l3_parity.which_slice);
1230 spin_lock_irq(&dev_priv->irq_lock);
1231 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1232 spin_unlock_irq(&dev_priv->irq_lock);
1233
1234 mutex_unlock(&dev_priv->drm.struct_mutex);
1235 }
1236
1237 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1238 u32 iir)
1239 {
1240 if (!HAS_L3_DPF(dev_priv))
1241 return;
1242
1243 spin_lock(&dev_priv->irq_lock);
1244 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1245 spin_unlock(&dev_priv->irq_lock);
1246
1247 iir &= GT_PARITY_ERROR(dev_priv);
1248 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1249 dev_priv->l3_parity.which_slice |= 1 << 1;
1250
1251 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1252 dev_priv->l3_parity.which_slice |= 1 << 0;
1253
1254 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1255 }
1256
1257 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1258 u32 gt_iir)
1259 {
1260 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1261 notify_ring(dev_priv->engine[RCS]);
1262 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1263 notify_ring(dev_priv->engine[VCS]);
1264 }
1265
1266 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1267 u32 gt_iir)
1268 {
1269 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1270 notify_ring(dev_priv->engine[RCS]);
1271 if (gt_iir & GT_BSD_USER_INTERRUPT)
1272 notify_ring(dev_priv->engine[VCS]);
1273 if (gt_iir & GT_BLT_USER_INTERRUPT)
1274 notify_ring(dev_priv->engine[BCS]);
1275
1276 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1277 GT_BSD_CS_ERROR_INTERRUPT |
1278 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1279 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1280
1281 if (gt_iir & GT_PARITY_ERROR(dev_priv))
1282 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1283 }
1284
1285 static __always_inline void
1286 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1287 {
1288 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1289 notify_ring(engine);
1290 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1291 tasklet_schedule(&engine->irq_tasklet);
1292 }
1293
1294 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1295 u32 master_ctl,
1296 u32 gt_iir[4])
1297 {
1298 irqreturn_t ret = IRQ_NONE;
1299
1300 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1301 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1302 if (gt_iir[0]) {
1303 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1304 ret = IRQ_HANDLED;
1305 } else
1306 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1307 }
1308
1309 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1310 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1311 if (gt_iir[1]) {
1312 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1313 ret = IRQ_HANDLED;
1314 } else
1315 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1316 }
1317
1318 if (master_ctl & GEN8_GT_VECS_IRQ) {
1319 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1320 if (gt_iir[3]) {
1321 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
1322 ret = IRQ_HANDLED;
1323 } else
1324 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1325 }
1326
1327 if (master_ctl & GEN8_GT_PM_IRQ) {
1328 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
1329 if (gt_iir[2] & dev_priv->pm_rps_events) {
1330 I915_WRITE_FW(GEN8_GT_IIR(2),
1331 gt_iir[2] & dev_priv->pm_rps_events);
1332 ret = IRQ_HANDLED;
1333 } else
1334 DRM_ERROR("The master control interrupt lied (PM)!\n");
1335 }
1336
1337 return ret;
1338 }
1339
1340 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1341 u32 gt_iir[4])
1342 {
1343 if (gt_iir[0]) {
1344 gen8_cs_irq_handler(dev_priv->engine[RCS],
1345 gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1346 gen8_cs_irq_handler(dev_priv->engine[BCS],
1347 gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1348 }
1349
1350 if (gt_iir[1]) {
1351 gen8_cs_irq_handler(dev_priv->engine[VCS],
1352 gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1353 gen8_cs_irq_handler(dev_priv->engine[VCS2],
1354 gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1355 }
1356
1357 if (gt_iir[3])
1358 gen8_cs_irq_handler(dev_priv->engine[VECS],
1359 gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1360
1361 if (gt_iir[2] & dev_priv->pm_rps_events)
1362 gen6_rps_irq_handler(dev_priv, gt_iir[2]);
1363 }
1364
1365 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1366 {
1367 switch (port) {
1368 case PORT_A:
1369 return val & PORTA_HOTPLUG_LONG_DETECT;
1370 case PORT_B:
1371 return val & PORTB_HOTPLUG_LONG_DETECT;
1372 case PORT_C:
1373 return val & PORTC_HOTPLUG_LONG_DETECT;
1374 default:
1375 return false;
1376 }
1377 }
1378
1379 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1380 {
1381 switch (port) {
1382 case PORT_E:
1383 return val & PORTE_HOTPLUG_LONG_DETECT;
1384 default:
1385 return false;
1386 }
1387 }
1388
1389 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1390 {
1391 switch (port) {
1392 case PORT_A:
1393 return val & PORTA_HOTPLUG_LONG_DETECT;
1394 case PORT_B:
1395 return val & PORTB_HOTPLUG_LONG_DETECT;
1396 case PORT_C:
1397 return val & PORTC_HOTPLUG_LONG_DETECT;
1398 case PORT_D:
1399 return val & PORTD_HOTPLUG_LONG_DETECT;
1400 default:
1401 return false;
1402 }
1403 }
1404
1405 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1406 {
1407 switch (port) {
1408 case PORT_A:
1409 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1410 default:
1411 return false;
1412 }
1413 }
1414
1415 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1416 {
1417 switch (port) {
1418 case PORT_B:
1419 return val & PORTB_HOTPLUG_LONG_DETECT;
1420 case PORT_C:
1421 return val & PORTC_HOTPLUG_LONG_DETECT;
1422 case PORT_D:
1423 return val & PORTD_HOTPLUG_LONG_DETECT;
1424 default:
1425 return false;
1426 }
1427 }
1428
1429 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1430 {
1431 switch (port) {
1432 case PORT_B:
1433 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1434 case PORT_C:
1435 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1436 case PORT_D:
1437 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1438 default:
1439 return false;
1440 }
1441 }
1442
1443 /*
1444 * Get a bit mask of pins that have triggered, and which ones may be long.
1445 * This can be called multiple times with the same masks to accumulate
1446 * hotplug detection results from several registers.
1447 *
1448 * Note that the caller is expected to zero out the masks initially.
1449 */
1450 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1451 u32 hotplug_trigger, u32 dig_hotplug_reg,
1452 const u32 hpd[HPD_NUM_PINS],
1453 bool long_pulse_detect(enum port port, u32 val))
1454 {
1455 enum port port;
1456 int i;
1457
1458 for_each_hpd_pin(i) {
1459 if ((hpd[i] & hotplug_trigger) == 0)
1460 continue;
1461
1462 *pin_mask |= BIT(i);
1463
1464 if (!intel_hpd_pin_to_port(i, &port))
1465 continue;
1466
1467 if (long_pulse_detect(port, dig_hotplug_reg))
1468 *long_mask |= BIT(i);
1469 }
1470
1471 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1472 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1473
1474 }
1475
1476 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1477 {
1478 wake_up_all(&dev_priv->gmbus_wait_queue);
1479 }
1480
1481 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1482 {
1483 wake_up_all(&dev_priv->gmbus_wait_queue);
1484 }
1485
1486 #if defined(CONFIG_DEBUG_FS)
1487 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1488 enum pipe pipe,
1489 uint32_t crc0, uint32_t crc1,
1490 uint32_t crc2, uint32_t crc3,
1491 uint32_t crc4)
1492 {
1493 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1494 struct intel_pipe_crc_entry *entry;
1495 int head, tail;
1496
1497 spin_lock(&pipe_crc->lock);
1498
1499 if (!pipe_crc->entries) {
1500 spin_unlock(&pipe_crc->lock);
1501 DRM_DEBUG_KMS("spurious interrupt\n");
1502 return;
1503 }
1504
1505 head = pipe_crc->head;
1506 tail = pipe_crc->tail;
1507
1508 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1509 spin_unlock(&pipe_crc->lock);
1510 DRM_ERROR("CRC buffer overflowing\n");
1511 return;
1512 }
1513
1514 entry = &pipe_crc->entries[head];
1515
1516 entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
1517 pipe);
1518 entry->crc[0] = crc0;
1519 entry->crc[1] = crc1;
1520 entry->crc[2] = crc2;
1521 entry->crc[3] = crc3;
1522 entry->crc[4] = crc4;
1523
1524 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1525 pipe_crc->head = head;
1526
1527 spin_unlock(&pipe_crc->lock);
1528
1529 wake_up_interruptible(&pipe_crc->wq);
1530 }
1531 #else
1532 static inline void
1533 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1534 enum pipe pipe,
1535 uint32_t crc0, uint32_t crc1,
1536 uint32_t crc2, uint32_t crc3,
1537 uint32_t crc4) {}
1538 #endif
1539
1540
1541 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1542 enum pipe pipe)
1543 {
1544 display_pipe_crc_irq_handler(dev_priv, pipe,
1545 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1546 0, 0, 0, 0);
1547 }
1548
1549 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1550 enum pipe pipe)
1551 {
1552 display_pipe_crc_irq_handler(dev_priv, pipe,
1553 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1554 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1555 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1556 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1557 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1558 }
1559
1560 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1561 enum pipe pipe)
1562 {
1563 uint32_t res1, res2;
1564
1565 if (INTEL_GEN(dev_priv) >= 3)
1566 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1567 else
1568 res1 = 0;
1569
1570 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1571 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1572 else
1573 res2 = 0;
1574
1575 display_pipe_crc_irq_handler(dev_priv, pipe,
1576 I915_READ(PIPE_CRC_RES_RED(pipe)),
1577 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1578 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1579 res1, res2);
1580 }
1581
1582 /* The RPS events need forcewake, so we add them to a work queue and mask their
1583 * IMR bits until the work is done. Other interrupts can be processed without
1584 * the work queue. */
1585 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1586 {
1587 if (pm_iir & dev_priv->pm_rps_events) {
1588 spin_lock(&dev_priv->irq_lock);
1589 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1590 if (dev_priv->rps.interrupts_enabled) {
1591 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1592 schedule_work(&dev_priv->rps.work);
1593 }
1594 spin_unlock(&dev_priv->irq_lock);
1595 }
1596
1597 if (INTEL_INFO(dev_priv)->gen >= 8)
1598 return;
1599
1600 if (HAS_VEBOX(dev_priv)) {
1601 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1602 notify_ring(dev_priv->engine[VECS]);
1603
1604 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1605 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1606 }
1607 }
1608
1609 static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1610 enum pipe pipe)
1611 {
1612 bool ret;
1613
1614 ret = drm_handle_vblank(&dev_priv->drm, pipe);
1615 if (ret)
1616 intel_finish_page_flip_mmio(dev_priv, pipe);
1617
1618 return ret;
1619 }
1620
1621 static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1622 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1623 {
1624 int pipe;
1625
1626 spin_lock(&dev_priv->irq_lock);
1627
1628 if (!dev_priv->display_irqs_enabled) {
1629 spin_unlock(&dev_priv->irq_lock);
1630 return;
1631 }
1632
1633 for_each_pipe(dev_priv, pipe) {
1634 i915_reg_t reg;
1635 u32 mask, iir_bit = 0;
1636
1637 /*
1638 * PIPESTAT bits get signalled even when the interrupt is
1639 * disabled with the mask bits, and some of the status bits do
1640 * not generate interrupts at all (like the underrun bit). Hence
1641 * we need to be careful that we only handle what we want to
1642 * handle.
1643 */
1644
1645 /* fifo underruns are filterered in the underrun handler. */
1646 mask = PIPE_FIFO_UNDERRUN_STATUS;
1647
1648 switch (pipe) {
1649 case PIPE_A:
1650 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1651 break;
1652 case PIPE_B:
1653 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1654 break;
1655 case PIPE_C:
1656 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1657 break;
1658 }
1659 if (iir & iir_bit)
1660 mask |= dev_priv->pipestat_irq_mask[pipe];
1661
1662 if (!mask)
1663 continue;
1664
1665 reg = PIPESTAT(pipe);
1666 mask |= PIPESTAT_INT_ENABLE_MASK;
1667 pipe_stats[pipe] = I915_READ(reg) & mask;
1668
1669 /*
1670 * Clear the PIPE*STAT regs before the IIR
1671 */
1672 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1673 PIPESTAT_INT_STATUS_MASK))
1674 I915_WRITE(reg, pipe_stats[pipe]);
1675 }
1676 spin_unlock(&dev_priv->irq_lock);
1677 }
1678
1679 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1680 u32 pipe_stats[I915_MAX_PIPES])
1681 {
1682 enum pipe pipe;
1683
1684 for_each_pipe(dev_priv, pipe) {
1685 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1686 intel_pipe_handle_vblank(dev_priv, pipe))
1687 intel_check_page_flip(dev_priv, pipe);
1688
1689 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1690 intel_finish_page_flip_cs(dev_priv, pipe);
1691
1692 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1693 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1694
1695 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1696 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1697 }
1698
1699 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1700 gmbus_irq_handler(dev_priv);
1701 }
1702
1703 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1704 {
1705 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1706
1707 if (hotplug_status)
1708 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1709
1710 return hotplug_status;
1711 }
1712
1713 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1714 u32 hotplug_status)
1715 {
1716 u32 pin_mask = 0, long_mask = 0;
1717
1718 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1719 IS_CHERRYVIEW(dev_priv)) {
1720 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1721
1722 if (hotplug_trigger) {
1723 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1724 hotplug_trigger, hpd_status_g4x,
1725 i9xx_port_hotplug_long_detect);
1726
1727 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1728 }
1729
1730 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1731 dp_aux_irq_handler(dev_priv);
1732 } else {
1733 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1734
1735 if (hotplug_trigger) {
1736 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1737 hotplug_trigger, hpd_status_i915,
1738 i9xx_port_hotplug_long_detect);
1739 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1740 }
1741 }
1742 }
1743
1744 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1745 {
1746 struct drm_device *dev = arg;
1747 struct drm_i915_private *dev_priv = to_i915(dev);
1748 irqreturn_t ret = IRQ_NONE;
1749
1750 if (!intel_irqs_enabled(dev_priv))
1751 return IRQ_NONE;
1752
1753 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1754 disable_rpm_wakeref_asserts(dev_priv);
1755
1756 do {
1757 u32 iir, gt_iir, pm_iir;
1758 u32 pipe_stats[I915_MAX_PIPES] = {};
1759 u32 hotplug_status = 0;
1760 u32 ier = 0;
1761
1762 gt_iir = I915_READ(GTIIR);
1763 pm_iir = I915_READ(GEN6_PMIIR);
1764 iir = I915_READ(VLV_IIR);
1765
1766 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1767 break;
1768
1769 ret = IRQ_HANDLED;
1770
1771 /*
1772 * Theory on interrupt generation, based on empirical evidence:
1773 *
1774 * x = ((VLV_IIR & VLV_IER) ||
1775 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1776 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1777 *
1778 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1779 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1780 * guarantee the CPU interrupt will be raised again even if we
1781 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1782 * bits this time around.
1783 */
1784 I915_WRITE(VLV_MASTER_IER, 0);
1785 ier = I915_READ(VLV_IER);
1786 I915_WRITE(VLV_IER, 0);
1787
1788 if (gt_iir)
1789 I915_WRITE(GTIIR, gt_iir);
1790 if (pm_iir)
1791 I915_WRITE(GEN6_PMIIR, pm_iir);
1792
1793 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1794 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1795
1796 /* Call regardless, as some status bits might not be
1797 * signalled in iir */
1798 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1799
1800 /*
1801 * VLV_IIR is single buffered, and reflects the level
1802 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1803 */
1804 if (iir)
1805 I915_WRITE(VLV_IIR, iir);
1806
1807 I915_WRITE(VLV_IER, ier);
1808 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1809 POSTING_READ(VLV_MASTER_IER);
1810
1811 if (gt_iir)
1812 snb_gt_irq_handler(dev_priv, gt_iir);
1813 if (pm_iir)
1814 gen6_rps_irq_handler(dev_priv, pm_iir);
1815
1816 if (hotplug_status)
1817 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1818
1819 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1820 } while (0);
1821
1822 enable_rpm_wakeref_asserts(dev_priv);
1823
1824 return ret;
1825 }
1826
1827 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1828 {
1829 struct drm_device *dev = arg;
1830 struct drm_i915_private *dev_priv = to_i915(dev);
1831 irqreturn_t ret = IRQ_NONE;
1832
1833 if (!intel_irqs_enabled(dev_priv))
1834 return IRQ_NONE;
1835
1836 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1837 disable_rpm_wakeref_asserts(dev_priv);
1838
1839 do {
1840 u32 master_ctl, iir;
1841 u32 gt_iir[4] = {};
1842 u32 pipe_stats[I915_MAX_PIPES] = {};
1843 u32 hotplug_status = 0;
1844 u32 ier = 0;
1845
1846 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1847 iir = I915_READ(VLV_IIR);
1848
1849 if (master_ctl == 0 && iir == 0)
1850 break;
1851
1852 ret = IRQ_HANDLED;
1853
1854 /*
1855 * Theory on interrupt generation, based on empirical evidence:
1856 *
1857 * x = ((VLV_IIR & VLV_IER) ||
1858 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1859 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1860 *
1861 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1862 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1863 * guarantee the CPU interrupt will be raised again even if we
1864 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1865 * bits this time around.
1866 */
1867 I915_WRITE(GEN8_MASTER_IRQ, 0);
1868 ier = I915_READ(VLV_IER);
1869 I915_WRITE(VLV_IER, 0);
1870
1871 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
1872
1873 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1874 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1875
1876 /* Call regardless, as some status bits might not be
1877 * signalled in iir */
1878 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1879
1880 /*
1881 * VLV_IIR is single buffered, and reflects the level
1882 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1883 */
1884 if (iir)
1885 I915_WRITE(VLV_IIR, iir);
1886
1887 I915_WRITE(VLV_IER, ier);
1888 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1889 POSTING_READ(GEN8_MASTER_IRQ);
1890
1891 gen8_gt_irq_handler(dev_priv, gt_iir);
1892
1893 if (hotplug_status)
1894 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1895
1896 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1897 } while (0);
1898
1899 enable_rpm_wakeref_asserts(dev_priv);
1900
1901 return ret;
1902 }
1903
1904 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1905 u32 hotplug_trigger,
1906 const u32 hpd[HPD_NUM_PINS])
1907 {
1908 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1909
1910 /*
1911 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1912 * unless we touch the hotplug register, even if hotplug_trigger is
1913 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1914 * errors.
1915 */
1916 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1917 if (!hotplug_trigger) {
1918 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1919 PORTD_HOTPLUG_STATUS_MASK |
1920 PORTC_HOTPLUG_STATUS_MASK |
1921 PORTB_HOTPLUG_STATUS_MASK;
1922 dig_hotplug_reg &= ~mask;
1923 }
1924
1925 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1926 if (!hotplug_trigger)
1927 return;
1928
1929 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1930 dig_hotplug_reg, hpd,
1931 pch_port_hotplug_long_detect);
1932
1933 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1934 }
1935
1936 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1937 {
1938 int pipe;
1939 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1940
1941 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1942
1943 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1944 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1945 SDE_AUDIO_POWER_SHIFT);
1946 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1947 port_name(port));
1948 }
1949
1950 if (pch_iir & SDE_AUX_MASK)
1951 dp_aux_irq_handler(dev_priv);
1952
1953 if (pch_iir & SDE_GMBUS)
1954 gmbus_irq_handler(dev_priv);
1955
1956 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1957 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1958
1959 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1960 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1961
1962 if (pch_iir & SDE_POISON)
1963 DRM_ERROR("PCH poison interrupt\n");
1964
1965 if (pch_iir & SDE_FDI_MASK)
1966 for_each_pipe(dev_priv, pipe)
1967 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1968 pipe_name(pipe),
1969 I915_READ(FDI_RX_IIR(pipe)));
1970
1971 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1972 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1973
1974 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1975 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1976
1977 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1978 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1979
1980 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1981 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1982 }
1983
1984 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1985 {
1986 u32 err_int = I915_READ(GEN7_ERR_INT);
1987 enum pipe pipe;
1988
1989 if (err_int & ERR_INT_POISON)
1990 DRM_ERROR("Poison interrupt\n");
1991
1992 for_each_pipe(dev_priv, pipe) {
1993 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1994 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1995
1996 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1997 if (IS_IVYBRIDGE(dev_priv))
1998 ivb_pipe_crc_irq_handler(dev_priv, pipe);
1999 else
2000 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2001 }
2002 }
2003
2004 I915_WRITE(GEN7_ERR_INT, err_int);
2005 }
2006
2007 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2008 {
2009 u32 serr_int = I915_READ(SERR_INT);
2010
2011 if (serr_int & SERR_INT_POISON)
2012 DRM_ERROR("PCH poison interrupt\n");
2013
2014 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2015 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2016
2017 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2018 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2019
2020 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2021 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2022
2023 I915_WRITE(SERR_INT, serr_int);
2024 }
2025
2026 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2027 {
2028 int pipe;
2029 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2030
2031 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2032
2033 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2034 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2035 SDE_AUDIO_POWER_SHIFT_CPT);
2036 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2037 port_name(port));
2038 }
2039
2040 if (pch_iir & SDE_AUX_MASK_CPT)
2041 dp_aux_irq_handler(dev_priv);
2042
2043 if (pch_iir & SDE_GMBUS_CPT)
2044 gmbus_irq_handler(dev_priv);
2045
2046 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2047 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2048
2049 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2050 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2051
2052 if (pch_iir & SDE_FDI_MASK_CPT)
2053 for_each_pipe(dev_priv, pipe)
2054 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2055 pipe_name(pipe),
2056 I915_READ(FDI_RX_IIR(pipe)));
2057
2058 if (pch_iir & SDE_ERROR_CPT)
2059 cpt_serr_int_handler(dev_priv);
2060 }
2061
2062 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2063 {
2064 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2065 ~SDE_PORTE_HOTPLUG_SPT;
2066 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2067 u32 pin_mask = 0, long_mask = 0;
2068
2069 if (hotplug_trigger) {
2070 u32 dig_hotplug_reg;
2071
2072 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2073 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2074
2075 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2076 dig_hotplug_reg, hpd_spt,
2077 spt_port_hotplug_long_detect);
2078 }
2079
2080 if (hotplug2_trigger) {
2081 u32 dig_hotplug_reg;
2082
2083 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2084 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2085
2086 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2087 dig_hotplug_reg, hpd_spt,
2088 spt_port_hotplug2_long_detect);
2089 }
2090
2091 if (pin_mask)
2092 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2093
2094 if (pch_iir & SDE_GMBUS_CPT)
2095 gmbus_irq_handler(dev_priv);
2096 }
2097
2098 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2099 u32 hotplug_trigger,
2100 const u32 hpd[HPD_NUM_PINS])
2101 {
2102 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2103
2104 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2105 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2106
2107 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2108 dig_hotplug_reg, hpd,
2109 ilk_port_hotplug_long_detect);
2110
2111 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2112 }
2113
2114 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2115 u32 de_iir)
2116 {
2117 enum pipe pipe;
2118 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2119
2120 if (hotplug_trigger)
2121 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2122
2123 if (de_iir & DE_AUX_CHANNEL_A)
2124 dp_aux_irq_handler(dev_priv);
2125
2126 if (de_iir & DE_GSE)
2127 intel_opregion_asle_intr(dev_priv);
2128
2129 if (de_iir & DE_POISON)
2130 DRM_ERROR("Poison interrupt\n");
2131
2132 for_each_pipe(dev_priv, pipe) {
2133 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2134 intel_pipe_handle_vblank(dev_priv, pipe))
2135 intel_check_page_flip(dev_priv, pipe);
2136
2137 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2138 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2139
2140 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2141 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2142
2143 /* plane/pipes map 1:1 on ilk+ */
2144 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2145 intel_finish_page_flip_cs(dev_priv, pipe);
2146 }
2147
2148 /* check event from PCH */
2149 if (de_iir & DE_PCH_EVENT) {
2150 u32 pch_iir = I915_READ(SDEIIR);
2151
2152 if (HAS_PCH_CPT(dev_priv))
2153 cpt_irq_handler(dev_priv, pch_iir);
2154 else
2155 ibx_irq_handler(dev_priv, pch_iir);
2156
2157 /* should clear PCH hotplug event before clear CPU irq */
2158 I915_WRITE(SDEIIR, pch_iir);
2159 }
2160
2161 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2162 ironlake_rps_change_irq_handler(dev_priv);
2163 }
2164
2165 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2166 u32 de_iir)
2167 {
2168 enum pipe pipe;
2169 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2170
2171 if (hotplug_trigger)
2172 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2173
2174 if (de_iir & DE_ERR_INT_IVB)
2175 ivb_err_int_handler(dev_priv);
2176
2177 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2178 dp_aux_irq_handler(dev_priv);
2179
2180 if (de_iir & DE_GSE_IVB)
2181 intel_opregion_asle_intr(dev_priv);
2182
2183 for_each_pipe(dev_priv, pipe) {
2184 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2185 intel_pipe_handle_vblank(dev_priv, pipe))
2186 intel_check_page_flip(dev_priv, pipe);
2187
2188 /* plane/pipes map 1:1 on ilk+ */
2189 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2190 intel_finish_page_flip_cs(dev_priv, pipe);
2191 }
2192
2193 /* check event from PCH */
2194 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2195 u32 pch_iir = I915_READ(SDEIIR);
2196
2197 cpt_irq_handler(dev_priv, pch_iir);
2198
2199 /* clear PCH hotplug event before clear CPU irq */
2200 I915_WRITE(SDEIIR, pch_iir);
2201 }
2202 }
2203
2204 /*
2205 * To handle irqs with the minimum potential races with fresh interrupts, we:
2206 * 1 - Disable Master Interrupt Control.
2207 * 2 - Find the source(s) of the interrupt.
2208 * 3 - Clear the Interrupt Identity bits (IIR).
2209 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2210 * 5 - Re-enable Master Interrupt Control.
2211 */
2212 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2213 {
2214 struct drm_device *dev = arg;
2215 struct drm_i915_private *dev_priv = to_i915(dev);
2216 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2217 irqreturn_t ret = IRQ_NONE;
2218
2219 if (!intel_irqs_enabled(dev_priv))
2220 return IRQ_NONE;
2221
2222 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2223 disable_rpm_wakeref_asserts(dev_priv);
2224
2225 /* disable master interrupt before clearing iir */
2226 de_ier = I915_READ(DEIER);
2227 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2228 POSTING_READ(DEIER);
2229
2230 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2231 * interrupts will will be stored on its back queue, and then we'll be
2232 * able to process them after we restore SDEIER (as soon as we restore
2233 * it, we'll get an interrupt if SDEIIR still has something to process
2234 * due to its back queue). */
2235 if (!HAS_PCH_NOP(dev_priv)) {
2236 sde_ier = I915_READ(SDEIER);
2237 I915_WRITE(SDEIER, 0);
2238 POSTING_READ(SDEIER);
2239 }
2240
2241 /* Find, clear, then process each source of interrupt */
2242
2243 gt_iir = I915_READ(GTIIR);
2244 if (gt_iir) {
2245 I915_WRITE(GTIIR, gt_iir);
2246 ret = IRQ_HANDLED;
2247 if (INTEL_GEN(dev_priv) >= 6)
2248 snb_gt_irq_handler(dev_priv, gt_iir);
2249 else
2250 ilk_gt_irq_handler(dev_priv, gt_iir);
2251 }
2252
2253 de_iir = I915_READ(DEIIR);
2254 if (de_iir) {
2255 I915_WRITE(DEIIR, de_iir);
2256 ret = IRQ_HANDLED;
2257 if (INTEL_GEN(dev_priv) >= 7)
2258 ivb_display_irq_handler(dev_priv, de_iir);
2259 else
2260 ilk_display_irq_handler(dev_priv, de_iir);
2261 }
2262
2263 if (INTEL_GEN(dev_priv) >= 6) {
2264 u32 pm_iir = I915_READ(GEN6_PMIIR);
2265 if (pm_iir) {
2266 I915_WRITE(GEN6_PMIIR, pm_iir);
2267 ret = IRQ_HANDLED;
2268 gen6_rps_irq_handler(dev_priv, pm_iir);
2269 }
2270 }
2271
2272 I915_WRITE(DEIER, de_ier);
2273 POSTING_READ(DEIER);
2274 if (!HAS_PCH_NOP(dev_priv)) {
2275 I915_WRITE(SDEIER, sde_ier);
2276 POSTING_READ(SDEIER);
2277 }
2278
2279 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2280 enable_rpm_wakeref_asserts(dev_priv);
2281
2282 return ret;
2283 }
2284
2285 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2286 u32 hotplug_trigger,
2287 const u32 hpd[HPD_NUM_PINS])
2288 {
2289 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2290
2291 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2292 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2293
2294 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2295 dig_hotplug_reg, hpd,
2296 bxt_port_hotplug_long_detect);
2297
2298 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2299 }
2300
2301 static irqreturn_t
2302 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2303 {
2304 irqreturn_t ret = IRQ_NONE;
2305 u32 iir;
2306 enum pipe pipe;
2307
2308 if (master_ctl & GEN8_DE_MISC_IRQ) {
2309 iir = I915_READ(GEN8_DE_MISC_IIR);
2310 if (iir) {
2311 I915_WRITE(GEN8_DE_MISC_IIR, iir);
2312 ret = IRQ_HANDLED;
2313 if (iir & GEN8_DE_MISC_GSE)
2314 intel_opregion_asle_intr(dev_priv);
2315 else
2316 DRM_ERROR("Unexpected DE Misc interrupt\n");
2317 }
2318 else
2319 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2320 }
2321
2322 if (master_ctl & GEN8_DE_PORT_IRQ) {
2323 iir = I915_READ(GEN8_DE_PORT_IIR);
2324 if (iir) {
2325 u32 tmp_mask;
2326 bool found = false;
2327
2328 I915_WRITE(GEN8_DE_PORT_IIR, iir);
2329 ret = IRQ_HANDLED;
2330
2331 tmp_mask = GEN8_AUX_CHANNEL_A;
2332 if (INTEL_INFO(dev_priv)->gen >= 9)
2333 tmp_mask |= GEN9_AUX_CHANNEL_B |
2334 GEN9_AUX_CHANNEL_C |
2335 GEN9_AUX_CHANNEL_D;
2336
2337 if (iir & tmp_mask) {
2338 dp_aux_irq_handler(dev_priv);
2339 found = true;
2340 }
2341
2342 if (IS_BROXTON(dev_priv)) {
2343 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2344 if (tmp_mask) {
2345 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2346 hpd_bxt);
2347 found = true;
2348 }
2349 } else if (IS_BROADWELL(dev_priv)) {
2350 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2351 if (tmp_mask) {
2352 ilk_hpd_irq_handler(dev_priv,
2353 tmp_mask, hpd_bdw);
2354 found = true;
2355 }
2356 }
2357
2358 if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2359 gmbus_irq_handler(dev_priv);
2360 found = true;
2361 }
2362
2363 if (!found)
2364 DRM_ERROR("Unexpected DE Port interrupt\n");
2365 }
2366 else
2367 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2368 }
2369
2370 for_each_pipe(dev_priv, pipe) {
2371 u32 flip_done, fault_errors;
2372
2373 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2374 continue;
2375
2376 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2377 if (!iir) {
2378 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2379 continue;
2380 }
2381
2382 ret = IRQ_HANDLED;
2383 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2384
2385 if (iir & GEN8_PIPE_VBLANK &&
2386 intel_pipe_handle_vblank(dev_priv, pipe))
2387 intel_check_page_flip(dev_priv, pipe);
2388
2389 flip_done = iir;
2390 if (INTEL_INFO(dev_priv)->gen >= 9)
2391 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2392 else
2393 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2394
2395 if (flip_done)
2396 intel_finish_page_flip_cs(dev_priv, pipe);
2397
2398 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2399 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2400
2401 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2402 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2403
2404 fault_errors = iir;
2405 if (INTEL_INFO(dev_priv)->gen >= 9)
2406 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2407 else
2408 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2409
2410 if (fault_errors)
2411 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2412 pipe_name(pipe),
2413 fault_errors);
2414 }
2415
2416 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2417 master_ctl & GEN8_DE_PCH_IRQ) {
2418 /*
2419 * FIXME(BDW): Assume for now that the new interrupt handling
2420 * scheme also closed the SDE interrupt handling race we've seen
2421 * on older pch-split platforms. But this needs testing.
2422 */
2423 iir = I915_READ(SDEIIR);
2424 if (iir) {
2425 I915_WRITE(SDEIIR, iir);
2426 ret = IRQ_HANDLED;
2427
2428 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
2429 spt_irq_handler(dev_priv, iir);
2430 else
2431 cpt_irq_handler(dev_priv, iir);
2432 } else {
2433 /*
2434 * Like on previous PCH there seems to be something
2435 * fishy going on with forwarding PCH interrupts.
2436 */
2437 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2438 }
2439 }
2440
2441 return ret;
2442 }
2443
2444 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2445 {
2446 struct drm_device *dev = arg;
2447 struct drm_i915_private *dev_priv = to_i915(dev);
2448 u32 master_ctl;
2449 u32 gt_iir[4] = {};
2450 irqreturn_t ret;
2451
2452 if (!intel_irqs_enabled(dev_priv))
2453 return IRQ_NONE;
2454
2455 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2456 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2457 if (!master_ctl)
2458 return IRQ_NONE;
2459
2460 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2461
2462 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2463 disable_rpm_wakeref_asserts(dev_priv);
2464
2465 /* Find, clear, then process each source of interrupt */
2466 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2467 gen8_gt_irq_handler(dev_priv, gt_iir);
2468 ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2469
2470 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2471 POSTING_READ_FW(GEN8_MASTER_IRQ);
2472
2473 enable_rpm_wakeref_asserts(dev_priv);
2474
2475 return ret;
2476 }
2477
2478 static void i915_error_wake_up(struct drm_i915_private *dev_priv)
2479 {
2480 /*
2481 * Notify all waiters for GPU completion events that reset state has
2482 * been changed, and that they need to restart their wait after
2483 * checking for potential errors (and bail out to drop locks if there is
2484 * a gpu reset pending so that i915_error_work_func can acquire them).
2485 */
2486
2487 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2488 wake_up_all(&dev_priv->gpu_error.wait_queue);
2489
2490 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2491 wake_up_all(&dev_priv->pending_flip_queue);
2492 }
2493
2494 /**
2495 * i915_reset_and_wakeup - do process context error handling work
2496 * @dev_priv: i915 device private
2497 *
2498 * Fire an error uevent so userspace can see that a hang or error
2499 * was detected.
2500 */
2501 static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2502 {
2503 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2504 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2505 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2506 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2507
2508 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2509
2510 DRM_DEBUG_DRIVER("resetting chip\n");
2511 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
2512
2513 /*
2514 * In most cases it's guaranteed that we get here with an RPM
2515 * reference held, for example because there is a pending GPU
2516 * request that won't finish until the reset is done. This
2517 * isn't the case at least when we get here by doing a
2518 * simulated reset via debugs, so get an RPM reference.
2519 */
2520 intel_runtime_pm_get(dev_priv);
2521 intel_prepare_reset(dev_priv);
2522
2523 do {
2524 /*
2525 * All state reset _must_ be completed before we update the
2526 * reset counter, for otherwise waiters might miss the reset
2527 * pending state and not properly drop locks, resulting in
2528 * deadlocks with the reset work.
2529 */
2530 if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
2531 i915_reset(dev_priv);
2532 mutex_unlock(&dev_priv->drm.struct_mutex);
2533 }
2534
2535 /* We need to wait for anyone holding the lock to wakeup */
2536 } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
2537 I915_RESET_IN_PROGRESS,
2538 TASK_UNINTERRUPTIBLE,
2539 HZ));
2540
2541 intel_finish_reset(dev_priv);
2542 intel_runtime_pm_put(dev_priv);
2543
2544 if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
2545 kobject_uevent_env(kobj,
2546 KOBJ_CHANGE, reset_done_event);
2547
2548 /*
2549 * Note: The wake_up also serves as a memory barrier so that
2550 * waiters see the updated value of the dev_priv->gpu_error.
2551 */
2552 wake_up_all(&dev_priv->gpu_error.reset_queue);
2553 }
2554
2555 static inline void
2556 i915_err_print_instdone(struct drm_i915_private *dev_priv,
2557 struct intel_instdone *instdone)
2558 {
2559 int slice;
2560 int subslice;
2561
2562 pr_err(" INSTDONE: 0x%08x\n", instdone->instdone);
2563
2564 if (INTEL_GEN(dev_priv) <= 3)
2565 return;
2566
2567 pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common);
2568
2569 if (INTEL_GEN(dev_priv) <= 6)
2570 return;
2571
2572 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
2573 pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
2574 slice, subslice, instdone->sampler[slice][subslice]);
2575
2576 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
2577 pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n",
2578 slice, subslice, instdone->row[slice][subslice]);
2579 }
2580
2581 static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
2582 {
2583 struct intel_instdone instdone;
2584 u32 eir = I915_READ(EIR);
2585 int pipe;
2586
2587 if (!eir)
2588 return;
2589
2590 pr_err("render error detected, EIR: 0x%08x\n", eir);
2591
2592 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
2593
2594 if (IS_G4X(dev_priv)) {
2595 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2596 u32 ipeir = I915_READ(IPEIR_I965);
2597
2598 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2599 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2600 i915_err_print_instdone(dev_priv, &instdone);
2601 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2602 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2603 I915_WRITE(IPEIR_I965, ipeir);
2604 POSTING_READ(IPEIR_I965);
2605 }
2606 if (eir & GM45_ERROR_PAGE_TABLE) {
2607 u32 pgtbl_err = I915_READ(PGTBL_ER);
2608 pr_err("page table error\n");
2609 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2610 I915_WRITE(PGTBL_ER, pgtbl_err);
2611 POSTING_READ(PGTBL_ER);
2612 }
2613 }
2614
2615 if (!IS_GEN2(dev_priv)) {
2616 if (eir & I915_ERROR_PAGE_TABLE) {
2617 u32 pgtbl_err = I915_READ(PGTBL_ER);
2618 pr_err("page table error\n");
2619 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2620 I915_WRITE(PGTBL_ER, pgtbl_err);
2621 POSTING_READ(PGTBL_ER);
2622 }
2623 }
2624
2625 if (eir & I915_ERROR_MEMORY_REFRESH) {
2626 pr_err("memory refresh error:\n");
2627 for_each_pipe(dev_priv, pipe)
2628 pr_err("pipe %c stat: 0x%08x\n",
2629 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2630 /* pipestat has already been acked */
2631 }
2632 if (eir & I915_ERROR_INSTRUCTION) {
2633 pr_err("instruction error\n");
2634 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2635 i915_err_print_instdone(dev_priv, &instdone);
2636 if (INTEL_GEN(dev_priv) < 4) {
2637 u32 ipeir = I915_READ(IPEIR);
2638
2639 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2640 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2641 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2642 I915_WRITE(IPEIR, ipeir);
2643 POSTING_READ(IPEIR);
2644 } else {
2645 u32 ipeir = I915_READ(IPEIR_I965);
2646
2647 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2648 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2649 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2650 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2651 I915_WRITE(IPEIR_I965, ipeir);
2652 POSTING_READ(IPEIR_I965);
2653 }
2654 }
2655
2656 I915_WRITE(EIR, eir);
2657 POSTING_READ(EIR);
2658 eir = I915_READ(EIR);
2659 if (eir) {
2660 /*
2661 * some errors might have become stuck,
2662 * mask them.
2663 */
2664 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2665 I915_WRITE(EMR, I915_READ(EMR) | eir);
2666 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2667 }
2668 }
2669
2670 /**
2671 * i915_handle_error - handle a gpu error
2672 * @dev_priv: i915 device private
2673 * @engine_mask: mask representing engines that are hung
2674 * Do some basic checking of register state at error time and
2675 * dump it to the syslog. Also call i915_capture_error_state() to make
2676 * sure we get a record and make it available in debugfs. Fire a uevent
2677 * so userspace knows something bad happened (should trigger collection
2678 * of a ring dump etc.).
2679 * @fmt: Error message format string
2680 */
2681 void i915_handle_error(struct drm_i915_private *dev_priv,
2682 u32 engine_mask,
2683 const char *fmt, ...)
2684 {
2685 va_list args;
2686 char error_msg[80];
2687
2688 va_start(args, fmt);
2689 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2690 va_end(args);
2691
2692 i915_capture_error_state(dev_priv, engine_mask, error_msg);
2693 i915_report_and_clear_eir(dev_priv);
2694
2695 if (!engine_mask)
2696 return;
2697
2698 if (test_and_set_bit(I915_RESET_IN_PROGRESS,
2699 &dev_priv->gpu_error.flags))
2700 return;
2701
2702 /*
2703 * Wakeup waiting processes so that the reset function
2704 * i915_reset_and_wakeup doesn't deadlock trying to grab
2705 * various locks. By bumping the reset counter first, the woken
2706 * processes will see a reset in progress and back off,
2707 * releasing their locks and then wait for the reset completion.
2708 * We must do this for _all_ gpu waiters that might hold locks
2709 * that the reset work needs to acquire.
2710 *
2711 * Note: The wake_up also provides a memory barrier to ensure that the
2712 * waiters see the updated value of the reset flags.
2713 */
2714 i915_error_wake_up(dev_priv);
2715
2716 i915_reset_and_wakeup(dev_priv);
2717 }
2718
2719 /* Called from drm generic code, passed 'crtc' which
2720 * we use as a pipe index
2721 */
2722 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
2723 {
2724 struct drm_i915_private *dev_priv = to_i915(dev);
2725 unsigned long irqflags;
2726
2727 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2728 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2729 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2730
2731 return 0;
2732 }
2733
2734 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
2735 {
2736 struct drm_i915_private *dev_priv = to_i915(dev);
2737 unsigned long irqflags;
2738
2739 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2740 i915_enable_pipestat(dev_priv, pipe,
2741 PIPE_START_VBLANK_INTERRUPT_STATUS);
2742 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2743
2744 return 0;
2745 }
2746
2747 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2748 {
2749 struct drm_i915_private *dev_priv = to_i915(dev);
2750 unsigned long irqflags;
2751 uint32_t bit = INTEL_GEN(dev) >= 7 ?
2752 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2753
2754 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2755 ilk_enable_display_irq(dev_priv, bit);
2756 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2757
2758 return 0;
2759 }
2760
2761 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2762 {
2763 struct drm_i915_private *dev_priv = to_i915(dev);
2764 unsigned long irqflags;
2765
2766 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2767 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2768 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2769
2770 return 0;
2771 }
2772
2773 /* Called from drm generic code, passed 'crtc' which
2774 * we use as a pipe index
2775 */
2776 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
2777 {
2778 struct drm_i915_private *dev_priv = to_i915(dev);
2779 unsigned long irqflags;
2780
2781 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2782 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2783 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2784 }
2785
2786 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
2787 {
2788 struct drm_i915_private *dev_priv = to_i915(dev);
2789 unsigned long irqflags;
2790
2791 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2792 i915_disable_pipestat(dev_priv, pipe,
2793 PIPE_START_VBLANK_INTERRUPT_STATUS);
2794 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2795 }
2796
2797 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2798 {
2799 struct drm_i915_private *dev_priv = to_i915(dev);
2800 unsigned long irqflags;
2801 uint32_t bit = INTEL_GEN(dev) >= 7 ?
2802 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2803
2804 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2805 ilk_disable_display_irq(dev_priv, bit);
2806 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2807 }
2808
2809 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2810 {
2811 struct drm_i915_private *dev_priv = to_i915(dev);
2812 unsigned long irqflags;
2813
2814 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2815 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2816 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2817 }
2818
2819 static bool
2820 ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
2821 {
2822 if (INTEL_GEN(engine->i915) >= 8) {
2823 return (ipehr >> 23) == 0x1c;
2824 } else {
2825 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2826 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2827 MI_SEMAPHORE_REGISTER);
2828 }
2829 }
2830
2831 static struct intel_engine_cs *
2832 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2833 u64 offset)
2834 {
2835 struct drm_i915_private *dev_priv = engine->i915;
2836 struct intel_engine_cs *signaller;
2837 enum intel_engine_id id;
2838
2839 if (INTEL_GEN(dev_priv) >= 8) {
2840 for_each_engine(signaller, dev_priv, id) {
2841 if (engine == signaller)
2842 continue;
2843
2844 if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
2845 return signaller;
2846 }
2847 } else {
2848 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2849
2850 for_each_engine(signaller, dev_priv, id) {
2851 if(engine == signaller)
2852 continue;
2853
2854 if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
2855 return signaller;
2856 }
2857 }
2858
2859 DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
2860 engine->name, ipehr, offset);
2861
2862 return ERR_PTR(-ENODEV);
2863 }
2864
2865 static struct intel_engine_cs *
2866 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2867 {
2868 struct drm_i915_private *dev_priv = engine->i915;
2869 void __iomem *vaddr;
2870 u32 cmd, ipehr, head;
2871 u64 offset = 0;
2872 int i, backwards;
2873
2874 /*
2875 * This function does not support execlist mode - any attempt to
2876 * proceed further into this function will result in a kernel panic
2877 * when dereferencing ring->buffer, which is not set up in execlist
2878 * mode.
2879 *
2880 * The correct way of doing it would be to derive the currently
2881 * executing ring buffer from the current context, which is derived
2882 * from the currently running request. Unfortunately, to get the
2883 * current request we would have to grab the struct_mutex before doing
2884 * anything else, which would be ill-advised since some other thread
2885 * might have grabbed it already and managed to hang itself, causing
2886 * the hang checker to deadlock.
2887 *
2888 * Therefore, this function does not support execlist mode in its
2889 * current form. Just return NULL and move on.
2890 */
2891 if (engine->buffer == NULL)
2892 return NULL;
2893
2894 ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2895 if (!ipehr_is_semaphore_wait(engine, ipehr))
2896 return NULL;
2897
2898 /*
2899 * HEAD is likely pointing to the dword after the actual command,
2900 * so scan backwards until we find the MBOX. But limit it to just 3
2901 * or 4 dwords depending on the semaphore wait command size.
2902 * Note that we don't care about ACTHD here since that might
2903 * point at at batch, and semaphores are always emitted into the
2904 * ringbuffer itself.
2905 */
2906 head = I915_READ_HEAD(engine) & HEAD_ADDR;
2907 backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
2908 vaddr = (void __iomem *)engine->buffer->vaddr;
2909
2910 for (i = backwards; i; --i) {
2911 /*
2912 * Be paranoid and presume the hw has gone off into the wild -
2913 * our ring is smaller than what the hardware (and hence
2914 * HEAD_ADDR) allows. Also handles wrap-around.
2915 */
2916 head &= engine->buffer->size - 1;
2917
2918 /* This here seems to blow up */
2919 cmd = ioread32(vaddr + head);
2920 if (cmd == ipehr)
2921 break;
2922
2923 head -= 4;
2924 }
2925
2926 if (!i)
2927 return NULL;
2928
2929 *seqno = ioread32(vaddr + head + 4) + 1;
2930 if (INTEL_GEN(dev_priv) >= 8) {
2931 offset = ioread32(vaddr + head + 12);
2932 offset <<= 32;
2933 offset |= ioread32(vaddr + head + 8);
2934 }
2935 return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
2936 }
2937
2938 static int semaphore_passed(struct intel_engine_cs *engine)
2939 {
2940 struct drm_i915_private *dev_priv = engine->i915;
2941 struct intel_engine_cs *signaller;
2942 u32 seqno;
2943
2944 engine->hangcheck.deadlock++;
2945
2946 signaller = semaphore_waits_for(engine, &seqno);
2947 if (signaller == NULL)
2948 return -1;
2949
2950 if (IS_ERR(signaller))
2951 return 0;
2952
2953 /* Prevent pathological recursion due to driver bugs */
2954 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
2955 return -1;
2956
2957 if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
2958 return 1;
2959
2960 /* cursory check for an unkickable deadlock */
2961 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2962 semaphore_passed(signaller) < 0)
2963 return -1;
2964
2965 return 0;
2966 }
2967
2968 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2969 {
2970 struct intel_engine_cs *engine;
2971 enum intel_engine_id id;
2972
2973 for_each_engine(engine, dev_priv, id)
2974 engine->hangcheck.deadlock = 0;
2975 }
2976
2977 static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
2978 {
2979 u32 tmp = current_instdone | *old_instdone;
2980 bool unchanged;
2981
2982 unchanged = tmp == *old_instdone;
2983 *old_instdone |= tmp;
2984
2985 return unchanged;
2986 }
2987
2988 static bool subunits_stuck(struct intel_engine_cs *engine)
2989 {
2990 struct drm_i915_private *dev_priv = engine->i915;
2991 struct intel_instdone instdone;
2992 struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
2993 bool stuck;
2994 int slice;
2995 int subslice;
2996
2997 if (engine->id != RCS)
2998 return true;
2999
3000 intel_engine_get_instdone(engine, &instdone);
3001
3002 /* There might be unstable subunit states even when
3003 * actual head is not moving. Filter out the unstable ones by
3004 * accumulating the undone -> done transitions and only
3005 * consider those as progress.
3006 */
3007 stuck = instdone_unchanged(instdone.instdone,
3008 &accu_instdone->instdone);
3009 stuck &= instdone_unchanged(instdone.slice_common,
3010 &accu_instdone->slice_common);
3011
3012 for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
3013 stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
3014 &accu_instdone->sampler[slice][subslice]);
3015 stuck &= instdone_unchanged(instdone.row[slice][subslice],
3016 &accu_instdone->row[slice][subslice]);
3017 }
3018
3019 return stuck;
3020 }
3021
3022 static enum intel_engine_hangcheck_action
3023 head_stuck(struct intel_engine_cs *engine, u64 acthd)
3024 {
3025 if (acthd != engine->hangcheck.acthd) {
3026
3027 /* Clear subunit states on head movement */
3028 memset(&engine->hangcheck.instdone, 0,
3029 sizeof(engine->hangcheck.instdone));
3030
3031 return HANGCHECK_ACTIVE;
3032 }
3033
3034 if (!subunits_stuck(engine))
3035 return HANGCHECK_ACTIVE;
3036
3037 return HANGCHECK_HUNG;
3038 }
3039
3040 static enum intel_engine_hangcheck_action
3041 engine_stuck(struct intel_engine_cs *engine, u64 acthd)
3042 {
3043 struct drm_i915_private *dev_priv = engine->i915;
3044 enum intel_engine_hangcheck_action ha;
3045 u32 tmp;
3046
3047 ha = head_stuck(engine, acthd);
3048 if (ha != HANGCHECK_HUNG)
3049 return ha;
3050
3051 if (IS_GEN2(dev_priv))
3052 return HANGCHECK_HUNG;
3053
3054 /* Is the chip hanging on a WAIT_FOR_EVENT?
3055 * If so we can simply poke the RB_WAIT bit
3056 * and break the hang. This should work on
3057 * all but the second generation chipsets.
3058 */
3059 tmp = I915_READ_CTL(engine);
3060 if (tmp & RING_WAIT) {
3061 i915_handle_error(dev_priv, 0,
3062 "Kicking stuck wait on %s",
3063 engine->name);
3064 I915_WRITE_CTL(engine, tmp);
3065 return HANGCHECK_KICK;
3066 }
3067
3068 if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3069 switch (semaphore_passed(engine)) {
3070 default:
3071 return HANGCHECK_HUNG;
3072 case 1:
3073 i915_handle_error(dev_priv, 0,
3074 "Kicking stuck semaphore on %s",
3075 engine->name);
3076 I915_WRITE_CTL(engine, tmp);
3077 return HANGCHECK_KICK;
3078 case 0:
3079 return HANGCHECK_WAIT;
3080 }
3081 }
3082
3083 return HANGCHECK_HUNG;
3084 }
3085
3086 /*
3087 * This is called when the chip hasn't reported back with completed
3088 * batchbuffers in a long time. We keep track per ring seqno progress and
3089 * if there are no progress, hangcheck score for that ring is increased.
3090 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3091 * we kick the ring. If we see no progress on three subsequent calls
3092 * we assume chip is wedged and try to fix it by resetting the chip.
3093 */
3094 static void i915_hangcheck_elapsed(struct work_struct *work)
3095 {
3096 struct drm_i915_private *dev_priv =
3097 container_of(work, typeof(*dev_priv),
3098 gpu_error.hangcheck_work.work);
3099 struct intel_engine_cs *engine;
3100 enum intel_engine_id id;
3101 unsigned int hung = 0, stuck = 0;
3102 int busy_count = 0;
3103 #define BUSY 1
3104 #define KICK 5
3105 #define HUNG 20
3106 #define ACTIVE_DECAY 15
3107
3108 if (!i915.enable_hangcheck)
3109 return;
3110
3111 if (!READ_ONCE(dev_priv->gt.awake))
3112 return;
3113
3114 /* As enabling the GPU requires fairly extensive mmio access,
3115 * periodically arm the mmio checker to see if we are triggering
3116 * any invalid access.
3117 */
3118 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3119
3120 for_each_engine(engine, dev_priv, id) {
3121 bool busy = intel_engine_has_waiter(engine);
3122 u64 acthd;
3123 u32 seqno;
3124 u32 submit;
3125
3126 semaphore_clear_deadlocks(dev_priv);
3127
3128 /* We don't strictly need an irq-barrier here, as we are not
3129 * serving an interrupt request, be paranoid in case the
3130 * barrier has side-effects (such as preventing a broken
3131 * cacheline snoop) and so be sure that we can see the seqno
3132 * advance. If the seqno should stick, due to a stale
3133 * cacheline, we would erroneously declare the GPU hung.
3134 */
3135 if (engine->irq_seqno_barrier)
3136 engine->irq_seqno_barrier(engine);
3137
3138 acthd = intel_engine_get_active_head(engine);
3139 seqno = intel_engine_get_seqno(engine);
3140 submit = READ_ONCE(engine->last_submitted_seqno);
3141
3142 if (engine->hangcheck.seqno == seqno) {
3143 if (i915_seqno_passed(seqno, submit)) {
3144 engine->hangcheck.action = HANGCHECK_IDLE;
3145 } else {
3146 /* We always increment the hangcheck score
3147 * if the engine is busy and still processing
3148 * the same request, so that no single request
3149 * can run indefinitely (such as a chain of
3150 * batches). The only time we do not increment
3151 * the hangcheck score on this ring, if this
3152 * engine is in a legitimate wait for another
3153 * engine. In that case the waiting engine is a
3154 * victim and we want to be sure we catch the
3155 * right culprit. Then every time we do kick
3156 * the ring, add a small increment to the
3157 * score so that we can catch a batch that is
3158 * being repeatedly kicked and so responsible
3159 * for stalling the machine.
3160 */
3161 engine->hangcheck.action =
3162 engine_stuck(engine, acthd);
3163
3164 switch (engine->hangcheck.action) {
3165 case HANGCHECK_IDLE:
3166 case HANGCHECK_WAIT:
3167 break;
3168 case HANGCHECK_ACTIVE:
3169 engine->hangcheck.score += BUSY;
3170 break;
3171 case HANGCHECK_KICK:
3172 engine->hangcheck.score += KICK;
3173 break;
3174 case HANGCHECK_HUNG:
3175 engine->hangcheck.score += HUNG;
3176 break;
3177 }
3178 }
3179
3180 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3181 hung |= intel_engine_flag(engine);
3182 if (engine->hangcheck.action != HANGCHECK_HUNG)
3183 stuck |= intel_engine_flag(engine);
3184 }
3185 } else {
3186 engine->hangcheck.action = HANGCHECK_ACTIVE;
3187
3188 /* Gradually reduce the count so that we catch DoS
3189 * attempts across multiple batches.
3190 */
3191 if (engine->hangcheck.score > 0)
3192 engine->hangcheck.score -= ACTIVE_DECAY;
3193 if (engine->hangcheck.score < 0)
3194 engine->hangcheck.score = 0;
3195
3196 /* Clear head and subunit states on seqno movement */
3197 acthd = 0;
3198
3199 memset(&engine->hangcheck.instdone, 0,
3200 sizeof(engine->hangcheck.instdone));
3201 }
3202
3203 engine->hangcheck.seqno = seqno;
3204 engine->hangcheck.acthd = acthd;
3205 busy_count += busy;
3206 }
3207
3208 if (hung) {
3209 char msg[80];
3210 unsigned int tmp;
3211 int len;
3212
3213 /* If some rings hung but others were still busy, only
3214 * blame the hanging rings in the synopsis.
3215 */
3216 if (stuck != hung)
3217 hung &= ~stuck;
3218 len = scnprintf(msg, sizeof(msg),
3219 "%s on ", stuck == hung ? "No progress" : "Hang");
3220 for_each_engine_masked(engine, dev_priv, hung, tmp)
3221 len += scnprintf(msg + len, sizeof(msg) - len,
3222 "%s, ", engine->name);
3223 msg[len-2] = '\0';
3224
3225 return i915_handle_error(dev_priv, hung, msg);
3226 }
3227
3228 /* Reset timer in case GPU hangs without another request being added */
3229 if (busy_count)
3230 i915_queue_hangcheck(dev_priv);
3231 }
3232
3233 static void ibx_irq_reset(struct drm_device *dev)
3234 {
3235 struct drm_i915_private *dev_priv = to_i915(dev);
3236
3237 if (HAS_PCH_NOP(dev))
3238 return;
3239
3240 GEN5_IRQ_RESET(SDE);
3241
3242 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3243 I915_WRITE(SERR_INT, 0xffffffff);
3244 }
3245
3246 /*
3247 * SDEIER is also touched by the interrupt handler to work around missed PCH
3248 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3249 * instead we unconditionally enable all PCH interrupt sources here, but then
3250 * only unmask them as needed with SDEIMR.
3251 *
3252 * This function needs to be called before interrupts are enabled.
3253 */
3254 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3255 {
3256 struct drm_i915_private *dev_priv = to_i915(dev);
3257
3258 if (HAS_PCH_NOP(dev))
3259 return;
3260
3261 WARN_ON(I915_READ(SDEIER) != 0);
3262 I915_WRITE(SDEIER, 0xffffffff);
3263 POSTING_READ(SDEIER);
3264 }
3265
3266 static void gen5_gt_irq_reset(struct drm_device *dev)
3267 {
3268 struct drm_i915_private *dev_priv = to_i915(dev);
3269
3270 GEN5_IRQ_RESET(GT);
3271 if (INTEL_INFO(dev)->gen >= 6)
3272 GEN5_IRQ_RESET(GEN6_PM);
3273 }
3274
3275 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3276 {
3277 enum pipe pipe;
3278
3279 if (IS_CHERRYVIEW(dev_priv))
3280 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3281 else
3282 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3283
3284 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3285 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3286
3287 for_each_pipe(dev_priv, pipe) {
3288 I915_WRITE(PIPESTAT(pipe),
3289 PIPE_FIFO_UNDERRUN_STATUS |
3290 PIPESTAT_INT_STATUS_MASK);
3291 dev_priv->pipestat_irq_mask[pipe] = 0;
3292 }
3293
3294 GEN5_IRQ_RESET(VLV_);
3295 dev_priv->irq_mask = ~0;
3296 }
3297
3298 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3299 {
3300 u32 pipestat_mask;
3301 u32 enable_mask;
3302 enum pipe pipe;
3303
3304 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3305 PIPE_CRC_DONE_INTERRUPT_STATUS;
3306
3307 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3308 for_each_pipe(dev_priv, pipe)
3309 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3310
3311 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3312 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3313 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3314 if (IS_CHERRYVIEW(dev_priv))
3315 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3316
3317 WARN_ON(dev_priv->irq_mask != ~0);
3318
3319 dev_priv->irq_mask = ~enable_mask;
3320
3321 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3322 }
3323
3324 /* drm_dma.h hooks
3325 */
3326 static void ironlake_irq_reset(struct drm_device *dev)
3327 {
3328 struct drm_i915_private *dev_priv = to_i915(dev);
3329
3330 I915_WRITE(HWSTAM, 0xffffffff);
3331
3332 GEN5_IRQ_RESET(DE);
3333 if (IS_GEN7(dev))
3334 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3335
3336 gen5_gt_irq_reset(dev);
3337
3338 ibx_irq_reset(dev);
3339 }
3340
3341 static void valleyview_irq_preinstall(struct drm_device *dev)
3342 {
3343 struct drm_i915_private *dev_priv = to_i915(dev);
3344
3345 I915_WRITE(VLV_MASTER_IER, 0);
3346 POSTING_READ(VLV_MASTER_IER);
3347
3348 gen5_gt_irq_reset(dev);
3349
3350 spin_lock_irq(&dev_priv->irq_lock);
3351 if (dev_priv->display_irqs_enabled)
3352 vlv_display_irq_reset(dev_priv);
3353 spin_unlock_irq(&dev_priv->irq_lock);
3354 }
3355
3356 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3357 {
3358 GEN8_IRQ_RESET_NDX(GT, 0);
3359 GEN8_IRQ_RESET_NDX(GT, 1);
3360 GEN8_IRQ_RESET_NDX(GT, 2);
3361 GEN8_IRQ_RESET_NDX(GT, 3);
3362 }
3363
3364 static void gen8_irq_reset(struct drm_device *dev)
3365 {
3366 struct drm_i915_private *dev_priv = to_i915(dev);
3367 int pipe;
3368
3369 I915_WRITE(GEN8_MASTER_IRQ, 0);
3370 POSTING_READ(GEN8_MASTER_IRQ);
3371
3372 gen8_gt_irq_reset(dev_priv);
3373
3374 for_each_pipe(dev_priv, pipe)
3375 if (intel_display_power_is_enabled(dev_priv,
3376 POWER_DOMAIN_PIPE(pipe)))
3377 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3378
3379 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3380 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3381 GEN5_IRQ_RESET(GEN8_PCU_);
3382
3383 if (HAS_PCH_SPLIT(dev))
3384 ibx_irq_reset(dev);
3385 }
3386
3387 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3388 unsigned int pipe_mask)
3389 {
3390 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3391 enum pipe pipe;
3392
3393 spin_lock_irq(&dev_priv->irq_lock);
3394 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3395 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3396 dev_priv->de_irq_mask[pipe],
3397 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3398 spin_unlock_irq(&dev_priv->irq_lock);
3399 }
3400
3401 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3402 unsigned int pipe_mask)
3403 {
3404 enum pipe pipe;
3405
3406 spin_lock_irq(&dev_priv->irq_lock);
3407 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3408 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3409 spin_unlock_irq(&dev_priv->irq_lock);
3410
3411 /* make sure we're done processing display irqs */
3412 synchronize_irq(dev_priv->drm.irq);
3413 }
3414
3415 static void cherryview_irq_preinstall(struct drm_device *dev)
3416 {
3417 struct drm_i915_private *dev_priv = to_i915(dev);
3418
3419 I915_WRITE(GEN8_MASTER_IRQ, 0);
3420 POSTING_READ(GEN8_MASTER_IRQ);
3421
3422 gen8_gt_irq_reset(dev_priv);
3423
3424 GEN5_IRQ_RESET(GEN8_PCU_);
3425
3426 spin_lock_irq(&dev_priv->irq_lock);
3427 if (dev_priv->display_irqs_enabled)
3428 vlv_display_irq_reset(dev_priv);
3429 spin_unlock_irq(&dev_priv->irq_lock);
3430 }
3431
3432 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3433 const u32 hpd[HPD_NUM_PINS])
3434 {
3435 struct intel_encoder *encoder;
3436 u32 enabled_irqs = 0;
3437
3438 for_each_intel_encoder(&dev_priv->drm, encoder)
3439 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3440 enabled_irqs |= hpd[encoder->hpd_pin];
3441
3442 return enabled_irqs;
3443 }
3444
3445 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3446 {
3447 u32 hotplug_irqs, hotplug, enabled_irqs;
3448
3449 if (HAS_PCH_IBX(dev_priv)) {
3450 hotplug_irqs = SDE_HOTPLUG_MASK;
3451 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3452 } else {
3453 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3454 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3455 }
3456
3457 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3458
3459 /*
3460 * Enable digital hotplug on the PCH, and configure the DP short pulse
3461 * duration to 2ms (which is the minimum in the Display Port spec).
3462 * The pulse duration bits are reserved on LPT+.
3463 */
3464 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3465 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3466 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3467 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3468 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3469 /*
3470 * When CPU and PCH are on the same package, port A
3471 * HPD must be enabled in both north and south.
3472 */
3473 if (HAS_PCH_LPT_LP(dev_priv))
3474 hotplug |= PORTA_HOTPLUG_ENABLE;
3475 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3476 }
3477
3478 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3479 {
3480 u32 hotplug_irqs, hotplug, enabled_irqs;
3481
3482 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3483 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3484
3485 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3486
3487 /* Enable digital hotplug on the PCH */
3488 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3489 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3490 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3491 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3492
3493 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3494 hotplug |= PORTE_HOTPLUG_ENABLE;
3495 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3496 }
3497
3498 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3499 {
3500 u32 hotplug_irqs, hotplug, enabled_irqs;
3501
3502 if (INTEL_GEN(dev_priv) >= 8) {
3503 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3504 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3505
3506 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3507 } else if (INTEL_GEN(dev_priv) >= 7) {
3508 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3509 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3510
3511 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3512 } else {
3513 hotplug_irqs = DE_DP_A_HOTPLUG;
3514 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3515
3516 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3517 }
3518
3519 /*
3520 * Enable digital hotplug on the CPU, and configure the DP short pulse
3521 * duration to 2ms (which is the minimum in the Display Port spec)
3522 * The pulse duration bits are reserved on HSW+.
3523 */
3524 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3525 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3526 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3527 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3528
3529 ibx_hpd_irq_setup(dev_priv);
3530 }
3531
3532 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3533 {
3534 u32 hotplug_irqs, hotplug, enabled_irqs;
3535
3536 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3537 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3538
3539 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3540
3541 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3542 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3543 PORTA_HOTPLUG_ENABLE;
3544
3545 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3546 hotplug, enabled_irqs);
3547 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3548
3549 /*
3550 * For BXT invert bit has to be set based on AOB design
3551 * for HPD detection logic, update it based on VBT fields.
3552 */
3553
3554 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3555 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3556 hotplug |= BXT_DDIA_HPD_INVERT;
3557 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3558 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3559 hotplug |= BXT_DDIB_HPD_INVERT;
3560 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3561 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3562 hotplug |= BXT_DDIC_HPD_INVERT;
3563
3564 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3565 }
3566
3567 static void ibx_irq_postinstall(struct drm_device *dev)
3568 {
3569 struct drm_i915_private *dev_priv = to_i915(dev);
3570 u32 mask;
3571
3572 if (HAS_PCH_NOP(dev))
3573 return;
3574
3575 if (HAS_PCH_IBX(dev))
3576 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3577 else
3578 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3579
3580 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3581 I915_WRITE(SDEIMR, ~mask);
3582 }
3583
3584 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3585 {
3586 struct drm_i915_private *dev_priv = to_i915(dev);
3587 u32 pm_irqs, gt_irqs;
3588
3589 pm_irqs = gt_irqs = 0;
3590
3591 dev_priv->gt_irq_mask = ~0;
3592 if (HAS_L3_DPF(dev)) {
3593 /* L3 parity interrupt is always unmasked. */
3594 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3595 gt_irqs |= GT_PARITY_ERROR(dev);
3596 }
3597
3598 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3599 if (IS_GEN5(dev)) {
3600 gt_irqs |= ILK_BSD_USER_INTERRUPT;
3601 } else {
3602 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3603 }
3604
3605 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3606
3607 if (INTEL_INFO(dev)->gen >= 6) {
3608 /*
3609 * RPS interrupts will get enabled/disabled on demand when RPS
3610 * itself is enabled/disabled.
3611 */
3612 if (HAS_VEBOX(dev))
3613 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3614
3615 dev_priv->pm_irq_mask = 0xffffffff;
3616 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3617 }
3618 }
3619
3620 static int ironlake_irq_postinstall(struct drm_device *dev)
3621 {
3622 struct drm_i915_private *dev_priv = to_i915(dev);
3623 u32 display_mask, extra_mask;
3624
3625 if (INTEL_INFO(dev)->gen >= 7) {
3626 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3627 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3628 DE_PLANEB_FLIP_DONE_IVB |
3629 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3630 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3631 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3632 DE_DP_A_HOTPLUG_IVB);
3633 } else {
3634 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3635 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3636 DE_AUX_CHANNEL_A |
3637 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3638 DE_POISON);
3639 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3640 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3641 DE_DP_A_HOTPLUG);
3642 }
3643
3644 dev_priv->irq_mask = ~display_mask;
3645
3646 I915_WRITE(HWSTAM, 0xeffe);
3647
3648 ibx_irq_pre_postinstall(dev);
3649
3650 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3651
3652 gen5_gt_irq_postinstall(dev);
3653
3654 ibx_irq_postinstall(dev);
3655
3656 if (IS_IRONLAKE_M(dev)) {
3657 /* Enable PCU event interrupts
3658 *
3659 * spinlocking not required here for correctness since interrupt
3660 * setup is guaranteed to run in single-threaded context. But we
3661 * need it to make the assert_spin_locked happy. */
3662 spin_lock_irq(&dev_priv->irq_lock);
3663 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3664 spin_unlock_irq(&dev_priv->irq_lock);
3665 }
3666
3667 return 0;
3668 }
3669
3670 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3671 {
3672 assert_spin_locked(&dev_priv->irq_lock);
3673
3674 if (dev_priv->display_irqs_enabled)
3675 return;
3676
3677 dev_priv->display_irqs_enabled = true;
3678
3679 if (intel_irqs_enabled(dev_priv)) {
3680 vlv_display_irq_reset(dev_priv);
3681 vlv_display_irq_postinstall(dev_priv);
3682 }
3683 }
3684
3685 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3686 {
3687 assert_spin_locked(&dev_priv->irq_lock);
3688
3689 if (!dev_priv->display_irqs_enabled)
3690 return;
3691
3692 dev_priv->display_irqs_enabled = false;
3693
3694 if (intel_irqs_enabled(dev_priv))
3695 vlv_display_irq_reset(dev_priv);
3696 }
3697
3698
3699 static int valleyview_irq_postinstall(struct drm_device *dev)
3700 {
3701 struct drm_i915_private *dev_priv = to_i915(dev);
3702
3703 gen5_gt_irq_postinstall(dev);
3704
3705 spin_lock_irq(&dev_priv->irq_lock);
3706 if (dev_priv->display_irqs_enabled)
3707 vlv_display_irq_postinstall(dev_priv);
3708 spin_unlock_irq(&dev_priv->irq_lock);
3709
3710 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3711 POSTING_READ(VLV_MASTER_IER);
3712
3713 return 0;
3714 }
3715
3716 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3717 {
3718 /* These are interrupts we'll toggle with the ring mask register */
3719 uint32_t gt_interrupts[] = {
3720 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3721 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3722 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3723 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3724 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3725 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3726 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3727 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3728 0,
3729 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3730 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3731 };
3732
3733 if (HAS_L3_DPF(dev_priv))
3734 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
3735
3736 dev_priv->pm_irq_mask = 0xffffffff;
3737 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3738 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3739 /*
3740 * RPS interrupts will get enabled/disabled on demand when RPS itself
3741 * is enabled/disabled.
3742 */
3743 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3744 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3745 }
3746
3747 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3748 {
3749 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3750 uint32_t de_pipe_enables;
3751 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3752 u32 de_port_enables;
3753 u32 de_misc_masked = GEN8_DE_MISC_GSE;
3754 enum pipe pipe;
3755
3756 if (INTEL_INFO(dev_priv)->gen >= 9) {
3757 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3758 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3759 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3760 GEN9_AUX_CHANNEL_D;
3761 if (IS_BROXTON(dev_priv))
3762 de_port_masked |= BXT_DE_PORT_GMBUS;
3763 } else {
3764 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3765 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3766 }
3767
3768 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3769 GEN8_PIPE_FIFO_UNDERRUN;
3770
3771 de_port_enables = de_port_masked;
3772 if (IS_BROXTON(dev_priv))
3773 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3774 else if (IS_BROADWELL(dev_priv))
3775 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3776
3777 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3778 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3779 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3780
3781 for_each_pipe(dev_priv, pipe)
3782 if (intel_display_power_is_enabled(dev_priv,
3783 POWER_DOMAIN_PIPE(pipe)))
3784 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3785 dev_priv->de_irq_mask[pipe],
3786 de_pipe_enables);
3787
3788 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3789 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3790 }
3791
3792 static int gen8_irq_postinstall(struct drm_device *dev)
3793 {
3794 struct drm_i915_private *dev_priv = to_i915(dev);
3795
3796 if (HAS_PCH_SPLIT(dev))
3797 ibx_irq_pre_postinstall(dev);
3798
3799 gen8_gt_irq_postinstall(dev_priv);
3800 gen8_de_irq_postinstall(dev_priv);
3801
3802 if (HAS_PCH_SPLIT(dev))
3803 ibx_irq_postinstall(dev);
3804
3805 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3806 POSTING_READ(GEN8_MASTER_IRQ);
3807
3808 return 0;
3809 }
3810
3811 static int cherryview_irq_postinstall(struct drm_device *dev)
3812 {
3813 struct drm_i915_private *dev_priv = to_i915(dev);
3814
3815 gen8_gt_irq_postinstall(dev_priv);
3816
3817 spin_lock_irq(&dev_priv->irq_lock);
3818 if (dev_priv->display_irqs_enabled)
3819 vlv_display_irq_postinstall(dev_priv);
3820 spin_unlock_irq(&dev_priv->irq_lock);
3821
3822 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3823 POSTING_READ(GEN8_MASTER_IRQ);
3824
3825 return 0;
3826 }
3827
3828 static void gen8_irq_uninstall(struct drm_device *dev)
3829 {
3830 struct drm_i915_private *dev_priv = to_i915(dev);
3831
3832 if (!dev_priv)
3833 return;
3834
3835 gen8_irq_reset(dev);
3836 }
3837
3838 static void valleyview_irq_uninstall(struct drm_device *dev)
3839 {
3840 struct drm_i915_private *dev_priv = to_i915(dev);
3841
3842 if (!dev_priv)
3843 return;
3844
3845 I915_WRITE(VLV_MASTER_IER, 0);
3846 POSTING_READ(VLV_MASTER_IER);
3847
3848 gen5_gt_irq_reset(dev);
3849
3850 I915_WRITE(HWSTAM, 0xffffffff);
3851
3852 spin_lock_irq(&dev_priv->irq_lock);
3853 if (dev_priv->display_irqs_enabled)
3854 vlv_display_irq_reset(dev_priv);
3855 spin_unlock_irq(&dev_priv->irq_lock);
3856 }
3857
3858 static void cherryview_irq_uninstall(struct drm_device *dev)
3859 {
3860 struct drm_i915_private *dev_priv = to_i915(dev);
3861
3862 if (!dev_priv)
3863 return;
3864
3865 I915_WRITE(GEN8_MASTER_IRQ, 0);
3866 POSTING_READ(GEN8_MASTER_IRQ);
3867
3868 gen8_gt_irq_reset(dev_priv);
3869
3870 GEN5_IRQ_RESET(GEN8_PCU_);
3871
3872 spin_lock_irq(&dev_priv->irq_lock);
3873 if (dev_priv->display_irqs_enabled)
3874 vlv_display_irq_reset(dev_priv);
3875 spin_unlock_irq(&dev_priv->irq_lock);
3876 }
3877
3878 static void ironlake_irq_uninstall(struct drm_device *dev)
3879 {
3880 struct drm_i915_private *dev_priv = to_i915(dev);
3881
3882 if (!dev_priv)
3883 return;
3884
3885 ironlake_irq_reset(dev);
3886 }
3887
3888 static void i8xx_irq_preinstall(struct drm_device * dev)
3889 {
3890 struct drm_i915_private *dev_priv = to_i915(dev);
3891 int pipe;
3892
3893 for_each_pipe(dev_priv, pipe)
3894 I915_WRITE(PIPESTAT(pipe), 0);
3895 I915_WRITE16(IMR, 0xffff);
3896 I915_WRITE16(IER, 0x0);
3897 POSTING_READ16(IER);
3898 }
3899
3900 static int i8xx_irq_postinstall(struct drm_device *dev)
3901 {
3902 struct drm_i915_private *dev_priv = to_i915(dev);
3903
3904 I915_WRITE16(EMR,
3905 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3906
3907 /* Unmask the interrupts that we always want on. */
3908 dev_priv->irq_mask =
3909 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3910 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3911 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3912 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3913 I915_WRITE16(IMR, dev_priv->irq_mask);
3914
3915 I915_WRITE16(IER,
3916 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3917 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3918 I915_USER_INTERRUPT);
3919 POSTING_READ16(IER);
3920
3921 /* Interrupt setup is already guaranteed to be single-threaded, this is
3922 * just to make the assert_spin_locked check happy. */
3923 spin_lock_irq(&dev_priv->irq_lock);
3924 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3925 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3926 spin_unlock_irq(&dev_priv->irq_lock);
3927
3928 return 0;
3929 }
3930
3931 /*
3932 * Returns true when a page flip has completed.
3933 */
3934 static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
3935 int plane, int pipe, u32 iir)
3936 {
3937 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3938
3939 if (!intel_pipe_handle_vblank(dev_priv, pipe))
3940 return false;
3941
3942 if ((iir & flip_pending) == 0)
3943 goto check_page_flip;
3944
3945 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3946 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3947 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3948 * the flip is completed (no longer pending). Since this doesn't raise
3949 * an interrupt per se, we watch for the change at vblank.
3950 */
3951 if (I915_READ16(ISR) & flip_pending)
3952 goto check_page_flip;
3953
3954 intel_finish_page_flip_cs(dev_priv, pipe);
3955 return true;
3956
3957 check_page_flip:
3958 intel_check_page_flip(dev_priv, pipe);
3959 return false;
3960 }
3961
3962 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3963 {
3964 struct drm_device *dev = arg;
3965 struct drm_i915_private *dev_priv = to_i915(dev);
3966 u16 iir, new_iir;
3967 u32 pipe_stats[2];
3968 int pipe;
3969 u16 flip_mask =
3970 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3971 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3972 irqreturn_t ret;
3973
3974 if (!intel_irqs_enabled(dev_priv))
3975 return IRQ_NONE;
3976
3977 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3978 disable_rpm_wakeref_asserts(dev_priv);
3979
3980 ret = IRQ_NONE;
3981 iir = I915_READ16(IIR);
3982 if (iir == 0)
3983 goto out;
3984
3985 while (iir & ~flip_mask) {
3986 /* Can't rely on pipestat interrupt bit in iir as it might
3987 * have been cleared after the pipestat interrupt was received.
3988 * It doesn't set the bit in iir again, but it still produces
3989 * interrupts (for non-MSI).
3990 */
3991 spin_lock(&dev_priv->irq_lock);
3992 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3993 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3994
3995 for_each_pipe(dev_priv, pipe) {
3996 i915_reg_t reg = PIPESTAT(pipe);
3997 pipe_stats[pipe] = I915_READ(reg);
3998
3999 /*
4000 * Clear the PIPE*STAT regs before the IIR
4001 */
4002 if (pipe_stats[pipe] & 0x8000ffff)
4003 I915_WRITE(reg, pipe_stats[pipe]);
4004 }
4005 spin_unlock(&dev_priv->irq_lock);
4006
4007 I915_WRITE16(IIR, iir & ~flip_mask);
4008 new_iir = I915_READ16(IIR); /* Flush posted writes */
4009
4010 if (iir & I915_USER_INTERRUPT)
4011 notify_ring(dev_priv->engine[RCS]);
4012
4013 for_each_pipe(dev_priv, pipe) {
4014 int plane = pipe;
4015 if (HAS_FBC(dev_priv))
4016 plane = !plane;
4017
4018 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4019 i8xx_handle_vblank(dev_priv, plane, pipe, iir))
4020 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4021
4022 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4023 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4024
4025 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4026 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4027 pipe);
4028 }
4029
4030 iir = new_iir;
4031 }
4032 ret = IRQ_HANDLED;
4033
4034 out:
4035 enable_rpm_wakeref_asserts(dev_priv);
4036
4037 return ret;
4038 }
4039
4040 static void i8xx_irq_uninstall(struct drm_device * dev)
4041 {
4042 struct drm_i915_private *dev_priv = to_i915(dev);
4043 int pipe;
4044
4045 for_each_pipe(dev_priv, pipe) {
4046 /* Clear enable bits; then clear status bits */
4047 I915_WRITE(PIPESTAT(pipe), 0);
4048 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4049 }
4050 I915_WRITE16(IMR, 0xffff);
4051 I915_WRITE16(IER, 0x0);
4052 I915_WRITE16(IIR, I915_READ16(IIR));
4053 }
4054
4055 static void i915_irq_preinstall(struct drm_device * dev)
4056 {
4057 struct drm_i915_private *dev_priv = to_i915(dev);
4058 int pipe;
4059
4060 if (I915_HAS_HOTPLUG(dev)) {
4061 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4062 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4063 }
4064
4065 I915_WRITE16(HWSTAM, 0xeffe);
4066 for_each_pipe(dev_priv, pipe)
4067 I915_WRITE(PIPESTAT(pipe), 0);
4068 I915_WRITE(IMR, 0xffffffff);
4069 I915_WRITE(IER, 0x0);
4070 POSTING_READ(IER);
4071 }
4072
4073 static int i915_irq_postinstall(struct drm_device *dev)
4074 {
4075 struct drm_i915_private *dev_priv = to_i915(dev);
4076 u32 enable_mask;
4077
4078 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4079
4080 /* Unmask the interrupts that we always want on. */
4081 dev_priv->irq_mask =
4082 ~(I915_ASLE_INTERRUPT |
4083 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4084 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4085 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4086 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4087
4088 enable_mask =
4089 I915_ASLE_INTERRUPT |
4090 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4091 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4092 I915_USER_INTERRUPT;
4093
4094 if (I915_HAS_HOTPLUG(dev)) {
4095 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4096 POSTING_READ(PORT_HOTPLUG_EN);
4097
4098 /* Enable in IER... */
4099 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4100 /* and unmask in IMR */
4101 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4102 }
4103
4104 I915_WRITE(IMR, dev_priv->irq_mask);
4105 I915_WRITE(IER, enable_mask);
4106 POSTING_READ(IER);
4107
4108 i915_enable_asle_pipestat(dev_priv);
4109
4110 /* Interrupt setup is already guaranteed to be single-threaded, this is
4111 * just to make the assert_spin_locked check happy. */
4112 spin_lock_irq(&dev_priv->irq_lock);
4113 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4114 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4115 spin_unlock_irq(&dev_priv->irq_lock);
4116
4117 return 0;
4118 }
4119
4120 /*
4121 * Returns true when a page flip has completed.
4122 */
4123 static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
4124 int plane, int pipe, u32 iir)
4125 {
4126 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4127
4128 if (!intel_pipe_handle_vblank(dev_priv, pipe))
4129 return false;
4130
4131 if ((iir & flip_pending) == 0)
4132 goto check_page_flip;
4133
4134 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4135 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4136 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4137 * the flip is completed (no longer pending). Since this doesn't raise
4138 * an interrupt per se, we watch for the change at vblank.
4139 */
4140 if (I915_READ(ISR) & flip_pending)
4141 goto check_page_flip;
4142
4143 intel_finish_page_flip_cs(dev_priv, pipe);
4144 return true;
4145
4146 check_page_flip:
4147 intel_check_page_flip(dev_priv, pipe);
4148 return false;
4149 }
4150
4151 static irqreturn_t i915_irq_handler(int irq, void *arg)
4152 {
4153 struct drm_device *dev = arg;
4154 struct drm_i915_private *dev_priv = to_i915(dev);
4155 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4156 u32 flip_mask =
4157 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4158 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4159 int pipe, ret = IRQ_NONE;
4160
4161 if (!intel_irqs_enabled(dev_priv))
4162 return IRQ_NONE;
4163
4164 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4165 disable_rpm_wakeref_asserts(dev_priv);
4166
4167 iir = I915_READ(IIR);
4168 do {
4169 bool irq_received = (iir & ~flip_mask) != 0;
4170 bool blc_event = false;
4171
4172 /* Can't rely on pipestat interrupt bit in iir as it might
4173 * have been cleared after the pipestat interrupt was received.
4174 * It doesn't set the bit in iir again, but it still produces
4175 * interrupts (for non-MSI).
4176 */
4177 spin_lock(&dev_priv->irq_lock);
4178 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4179 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4180
4181 for_each_pipe(dev_priv, pipe) {
4182 i915_reg_t reg = PIPESTAT(pipe);
4183 pipe_stats[pipe] = I915_READ(reg);
4184
4185 /* Clear the PIPE*STAT regs before the IIR */
4186 if (pipe_stats[pipe] & 0x8000ffff) {
4187 I915_WRITE(reg, pipe_stats[pipe]);
4188 irq_received = true;
4189 }
4190 }
4191 spin_unlock(&dev_priv->irq_lock);
4192
4193 if (!irq_received)
4194 break;
4195
4196 /* Consume port. Then clear IIR or we'll miss events */
4197 if (I915_HAS_HOTPLUG(dev_priv) &&
4198 iir & I915_DISPLAY_PORT_INTERRUPT) {
4199 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4200 if (hotplug_status)
4201 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4202 }
4203
4204 I915_WRITE(IIR, iir & ~flip_mask);
4205 new_iir = I915_READ(IIR); /* Flush posted writes */
4206
4207 if (iir & I915_USER_INTERRUPT)
4208 notify_ring(dev_priv->engine[RCS]);
4209
4210 for_each_pipe(dev_priv, pipe) {
4211 int plane = pipe;
4212 if (HAS_FBC(dev_priv))
4213 plane = !plane;
4214
4215 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4216 i915_handle_vblank(dev_priv, plane, pipe, iir))
4217 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4218
4219 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4220 blc_event = true;
4221
4222 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4223 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4224
4225 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4226 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4227 pipe);
4228 }
4229
4230 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4231 intel_opregion_asle_intr(dev_priv);
4232
4233 /* With MSI, interrupts are only generated when iir
4234 * transitions from zero to nonzero. If another bit got
4235 * set while we were handling the existing iir bits, then
4236 * we would never get another interrupt.
4237 *
4238 * This is fine on non-MSI as well, as if we hit this path
4239 * we avoid exiting the interrupt handler only to generate
4240 * another one.
4241 *
4242 * Note that for MSI this could cause a stray interrupt report
4243 * if an interrupt landed in the time between writing IIR and
4244 * the posting read. This should be rare enough to never
4245 * trigger the 99% of 100,000 interrupts test for disabling
4246 * stray interrupts.
4247 */
4248 ret = IRQ_HANDLED;
4249 iir = new_iir;
4250 } while (iir & ~flip_mask);
4251
4252 enable_rpm_wakeref_asserts(dev_priv);
4253
4254 return ret;
4255 }
4256
4257 static void i915_irq_uninstall(struct drm_device * dev)
4258 {
4259 struct drm_i915_private *dev_priv = to_i915(dev);
4260 int pipe;
4261
4262 if (I915_HAS_HOTPLUG(dev)) {
4263 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4264 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4265 }
4266
4267 I915_WRITE16(HWSTAM, 0xffff);
4268 for_each_pipe(dev_priv, pipe) {
4269 /* Clear enable bits; then clear status bits */
4270 I915_WRITE(PIPESTAT(pipe), 0);
4271 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4272 }
4273 I915_WRITE(IMR, 0xffffffff);
4274 I915_WRITE(IER, 0x0);
4275
4276 I915_WRITE(IIR, I915_READ(IIR));
4277 }
4278
4279 static void i965_irq_preinstall(struct drm_device * dev)
4280 {
4281 struct drm_i915_private *dev_priv = to_i915(dev);
4282 int pipe;
4283
4284 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4285 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4286
4287 I915_WRITE(HWSTAM, 0xeffe);
4288 for_each_pipe(dev_priv, pipe)
4289 I915_WRITE(PIPESTAT(pipe), 0);
4290 I915_WRITE(IMR, 0xffffffff);
4291 I915_WRITE(IER, 0x0);
4292 POSTING_READ(IER);
4293 }
4294
4295 static int i965_irq_postinstall(struct drm_device *dev)
4296 {
4297 struct drm_i915_private *dev_priv = to_i915(dev);
4298 u32 enable_mask;
4299 u32 error_mask;
4300
4301 /* Unmask the interrupts that we always want on. */
4302 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4303 I915_DISPLAY_PORT_INTERRUPT |
4304 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4305 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4306 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4307 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4308 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4309
4310 enable_mask = ~dev_priv->irq_mask;
4311 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4312 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4313 enable_mask |= I915_USER_INTERRUPT;
4314
4315 if (IS_G4X(dev_priv))
4316 enable_mask |= I915_BSD_USER_INTERRUPT;
4317
4318 /* Interrupt setup is already guaranteed to be single-threaded, this is
4319 * just to make the assert_spin_locked check happy. */
4320 spin_lock_irq(&dev_priv->irq_lock);
4321 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4322 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4323 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4324 spin_unlock_irq(&dev_priv->irq_lock);
4325
4326 /*
4327 * Enable some error detection, note the instruction error mask
4328 * bit is reserved, so we leave it masked.
4329 */
4330 if (IS_G4X(dev_priv)) {
4331 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4332 GM45_ERROR_MEM_PRIV |
4333 GM45_ERROR_CP_PRIV |
4334 I915_ERROR_MEMORY_REFRESH);
4335 } else {
4336 error_mask = ~(I915_ERROR_PAGE_TABLE |
4337 I915_ERROR_MEMORY_REFRESH);
4338 }
4339 I915_WRITE(EMR, error_mask);
4340
4341 I915_WRITE(IMR, dev_priv->irq_mask);
4342 I915_WRITE(IER, enable_mask);
4343 POSTING_READ(IER);
4344
4345 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4346 POSTING_READ(PORT_HOTPLUG_EN);
4347
4348 i915_enable_asle_pipestat(dev_priv);
4349
4350 return 0;
4351 }
4352
4353 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4354 {
4355 u32 hotplug_en;
4356
4357 assert_spin_locked(&dev_priv->irq_lock);
4358
4359 /* Note HDMI and DP share hotplug bits */
4360 /* enable bits are the same for all generations */
4361 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4362 /* Programming the CRT detection parameters tends
4363 to generate a spurious hotplug event about three
4364 seconds later. So just do it once.
4365 */
4366 if (IS_G4X(dev_priv))
4367 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4368 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4369
4370 /* Ignore TV since it's buggy */
4371 i915_hotplug_interrupt_update_locked(dev_priv,
4372 HOTPLUG_INT_EN_MASK |
4373 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4374 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4375 hotplug_en);
4376 }
4377
4378 static irqreturn_t i965_irq_handler(int irq, void *arg)
4379 {
4380 struct drm_device *dev = arg;
4381 struct drm_i915_private *dev_priv = to_i915(dev);
4382 u32 iir, new_iir;
4383 u32 pipe_stats[I915_MAX_PIPES];
4384 int ret = IRQ_NONE, pipe;
4385 u32 flip_mask =
4386 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4387 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4388
4389 if (!intel_irqs_enabled(dev_priv))
4390 return IRQ_NONE;
4391
4392 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4393 disable_rpm_wakeref_asserts(dev_priv);
4394
4395 iir = I915_READ(IIR);
4396
4397 for (;;) {
4398 bool irq_received = (iir & ~flip_mask) != 0;
4399 bool blc_event = false;
4400
4401 /* Can't rely on pipestat interrupt bit in iir as it might
4402 * have been cleared after the pipestat interrupt was received.
4403 * It doesn't set the bit in iir again, but it still produces
4404 * interrupts (for non-MSI).
4405 */
4406 spin_lock(&dev_priv->irq_lock);
4407 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4408 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4409
4410 for_each_pipe(dev_priv, pipe) {
4411 i915_reg_t reg = PIPESTAT(pipe);
4412 pipe_stats[pipe] = I915_READ(reg);
4413
4414 /*
4415 * Clear the PIPE*STAT regs before the IIR
4416 */
4417 if (pipe_stats[pipe] & 0x8000ffff) {
4418 I915_WRITE(reg, pipe_stats[pipe]);
4419 irq_received = true;
4420 }
4421 }
4422 spin_unlock(&dev_priv->irq_lock);
4423
4424 if (!irq_received)
4425 break;
4426
4427 ret = IRQ_HANDLED;
4428
4429 /* Consume port. Then clear IIR or we'll miss events */
4430 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4431 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4432 if (hotplug_status)
4433 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4434 }
4435
4436 I915_WRITE(IIR, iir & ~flip_mask);
4437 new_iir = I915_READ(IIR); /* Flush posted writes */
4438
4439 if (iir & I915_USER_INTERRUPT)
4440 notify_ring(dev_priv->engine[RCS]);
4441 if (iir & I915_BSD_USER_INTERRUPT)
4442 notify_ring(dev_priv->engine[VCS]);
4443
4444 for_each_pipe(dev_priv, pipe) {
4445 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4446 i915_handle_vblank(dev_priv, pipe, pipe, iir))
4447 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4448
4449 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4450 blc_event = true;
4451
4452 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4453 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4454
4455 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4456 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4457 }
4458
4459 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4460 intel_opregion_asle_intr(dev_priv);
4461
4462 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4463 gmbus_irq_handler(dev_priv);
4464
4465 /* With MSI, interrupts are only generated when iir
4466 * transitions from zero to nonzero. If another bit got
4467 * set while we were handling the existing iir bits, then
4468 * we would never get another interrupt.
4469 *
4470 * This is fine on non-MSI as well, as if we hit this path
4471 * we avoid exiting the interrupt handler only to generate
4472 * another one.
4473 *
4474 * Note that for MSI this could cause a stray interrupt report
4475 * if an interrupt landed in the time between writing IIR and
4476 * the posting read. This should be rare enough to never
4477 * trigger the 99% of 100,000 interrupts test for disabling
4478 * stray interrupts.
4479 */
4480 iir = new_iir;
4481 }
4482
4483 enable_rpm_wakeref_asserts(dev_priv);
4484
4485 return ret;
4486 }
4487
4488 static void i965_irq_uninstall(struct drm_device * dev)
4489 {
4490 struct drm_i915_private *dev_priv = to_i915(dev);
4491 int pipe;
4492
4493 if (!dev_priv)
4494 return;
4495
4496 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4497 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4498
4499 I915_WRITE(HWSTAM, 0xffffffff);
4500 for_each_pipe(dev_priv, pipe)
4501 I915_WRITE(PIPESTAT(pipe), 0);
4502 I915_WRITE(IMR, 0xffffffff);
4503 I915_WRITE(IER, 0x0);
4504
4505 for_each_pipe(dev_priv, pipe)
4506 I915_WRITE(PIPESTAT(pipe),
4507 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4508 I915_WRITE(IIR, I915_READ(IIR));
4509 }
4510
4511 /**
4512 * intel_irq_init - initializes irq support
4513 * @dev_priv: i915 device instance
4514 *
4515 * This function initializes all the irq support including work items, timers
4516 * and all the vtables. It does not setup the interrupt itself though.
4517 */
4518 void intel_irq_init(struct drm_i915_private *dev_priv)
4519 {
4520 struct drm_device *dev = &dev_priv->drm;
4521
4522 intel_hpd_init_work(dev_priv);
4523
4524 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4525 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4526
4527 /* Let's track the enabled rps events */
4528 if (IS_VALLEYVIEW(dev_priv))
4529 /* WaGsvRC0ResidencyMethod:vlv */
4530 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4531 else
4532 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4533
4534 dev_priv->rps.pm_intr_keep = 0;
4535
4536 /*
4537 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
4538 * if GEN6_PM_UP_EI_EXPIRED is masked.
4539 *
4540 * TODO: verify if this can be reproduced on VLV,CHV.
4541 */
4542 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
4543 dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
4544
4545 if (INTEL_INFO(dev_priv)->gen >= 8)
4546 dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
4547
4548 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4549 i915_hangcheck_elapsed);
4550
4551 if (IS_GEN2(dev_priv)) {
4552 /* Gen2 doesn't have a hardware frame counter */
4553 dev->max_vblank_count = 0;
4554 dev->driver->get_vblank_counter = drm_vblank_no_hw_counter;
4555 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4556 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4557 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4558 } else {
4559 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4560 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4561 }
4562
4563 /*
4564 * Opt out of the vblank disable timer on everything except gen2.
4565 * Gen2 doesn't have a hardware frame counter and so depends on
4566 * vblank interrupts to produce sane vblank seuquence numbers.
4567 */
4568 if (!IS_GEN2(dev_priv))
4569 dev->vblank_disable_immediate = true;
4570
4571 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4572 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4573
4574 if (IS_CHERRYVIEW(dev_priv)) {
4575 dev->driver->irq_handler = cherryview_irq_handler;
4576 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4577 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4578 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4579 dev->driver->enable_vblank = i965_enable_vblank;
4580 dev->driver->disable_vblank = i965_disable_vblank;
4581 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4582 } else if (IS_VALLEYVIEW(dev_priv)) {
4583 dev->driver->irq_handler = valleyview_irq_handler;
4584 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4585 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4586 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4587 dev->driver->enable_vblank = i965_enable_vblank;
4588 dev->driver->disable_vblank = i965_disable_vblank;
4589 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4590 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4591 dev->driver->irq_handler = gen8_irq_handler;
4592 dev->driver->irq_preinstall = gen8_irq_reset;
4593 dev->driver->irq_postinstall = gen8_irq_postinstall;
4594 dev->driver->irq_uninstall = gen8_irq_uninstall;
4595 dev->driver->enable_vblank = gen8_enable_vblank;
4596 dev->driver->disable_vblank = gen8_disable_vblank;
4597 if (IS_BROXTON(dev))
4598 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4599 else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
4600 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4601 else
4602 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4603 } else if (HAS_PCH_SPLIT(dev)) {
4604 dev->driver->irq_handler = ironlake_irq_handler;
4605 dev->driver->irq_preinstall = ironlake_irq_reset;
4606 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4607 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4608 dev->driver->enable_vblank = ironlake_enable_vblank;
4609 dev->driver->disable_vblank = ironlake_disable_vblank;
4610 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4611 } else {
4612 if (IS_GEN2(dev_priv)) {
4613 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4614 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4615 dev->driver->irq_handler = i8xx_irq_handler;
4616 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4617 dev->driver->enable_vblank = i8xx_enable_vblank;
4618 dev->driver->disable_vblank = i8xx_disable_vblank;
4619 } else if (IS_GEN3(dev_priv)) {
4620 dev->driver->irq_preinstall = i915_irq_preinstall;
4621 dev->driver->irq_postinstall = i915_irq_postinstall;
4622 dev->driver->irq_uninstall = i915_irq_uninstall;
4623 dev->driver->irq_handler = i915_irq_handler;
4624 dev->driver->enable_vblank = i8xx_enable_vblank;
4625 dev->driver->disable_vblank = i8xx_disable_vblank;
4626 } else {
4627 dev->driver->irq_preinstall = i965_irq_preinstall;
4628 dev->driver->irq_postinstall = i965_irq_postinstall;
4629 dev->driver->irq_uninstall = i965_irq_uninstall;
4630 dev->driver->irq_handler = i965_irq_handler;
4631 dev->driver->enable_vblank = i965_enable_vblank;
4632 dev->driver->disable_vblank = i965_disable_vblank;
4633 }
4634 if (I915_HAS_HOTPLUG(dev_priv))
4635 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4636 }
4637 }
4638
4639 /**
4640 * intel_irq_install - enables the hardware interrupt
4641 * @dev_priv: i915 device instance
4642 *
4643 * This function enables the hardware interrupt handling, but leaves the hotplug
4644 * handling still disabled. It is called after intel_irq_init().
4645 *
4646 * In the driver load and resume code we need working interrupts in a few places
4647 * but don't want to deal with the hassle of concurrent probe and hotplug
4648 * workers. Hence the split into this two-stage approach.
4649 */
4650 int intel_irq_install(struct drm_i915_private *dev_priv)
4651 {
4652 /*
4653 * We enable some interrupt sources in our postinstall hooks, so mark
4654 * interrupts as enabled _before_ actually enabling them to avoid
4655 * special cases in our ordering checks.
4656 */
4657 dev_priv->pm.irqs_enabled = true;
4658
4659 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4660 }
4661
4662 /**
4663 * intel_irq_uninstall - finilizes all irq handling
4664 * @dev_priv: i915 device instance
4665 *
4666 * This stops interrupt and hotplug handling and unregisters and frees all
4667 * resources acquired in the init functions.
4668 */
4669 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4670 {
4671 drm_irq_uninstall(&dev_priv->drm);
4672 intel_hpd_cancel_work(dev_priv);
4673 dev_priv->pm.irqs_enabled = false;
4674 }
4675
4676 /**
4677 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4678 * @dev_priv: i915 device instance
4679 *
4680 * This function is used to disable interrupts at runtime, both in the runtime
4681 * pm and the system suspend/resume code.
4682 */
4683 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4684 {
4685 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4686 dev_priv->pm.irqs_enabled = false;
4687 synchronize_irq(dev_priv->drm.irq);
4688 }
4689
4690 /**
4691 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4692 * @dev_priv: i915 device instance
4693 *
4694 * This function is used to enable interrupts at runtime, both in the runtime
4695 * pm and the system suspend/resume code.
4696 */
4697 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4698 {
4699 dev_priv->pm.irqs_enabled = true;
4700 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4701 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4702 }