]>
Commit | Line | Data |
---|---|---|
1 | /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- | |
2 | */ | |
3 | /* | |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. | |
5 | * All Rights Reserved. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
27 | */ | |
28 | ||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
30 | ||
31 | #include <linux/sysrq.h> | |
32 | #include <linux/slab.h> | |
33 | #include <linux/circ_buf.h> | |
34 | #include <drm/drmP.h> | |
35 | #include <drm/i915_drm.h> | |
36 | #include "i915_drv.h" | |
37 | #include "i915_trace.h" | |
38 | #include "intel_drv.h" | |
39 | ||
40 | static const u32 hpd_ibx[] = { | |
41 | [HPD_CRT] = SDE_CRT_HOTPLUG, | |
42 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, | |
43 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG, | |
44 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG, | |
45 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG | |
46 | }; | |
47 | ||
48 | static const u32 hpd_cpt[] = { | |
49 | [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, | |
50 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, | |
51 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, | |
52 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, | |
53 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT | |
54 | }; | |
55 | ||
56 | static const u32 hpd_mask_i915[] = { | |
57 | [HPD_CRT] = CRT_HOTPLUG_INT_EN, | |
58 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, | |
59 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, | |
60 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, | |
61 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, | |
62 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN | |
63 | }; | |
64 | ||
65 | static const u32 hpd_status_g4x[] = { | |
66 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, | |
67 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, | |
68 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, | |
69 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, | |
70 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, | |
71 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS | |
72 | }; | |
73 | ||
74 | static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ | |
75 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, | |
76 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, | |
77 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, | |
78 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, | |
79 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, | |
80 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS | |
81 | }; | |
82 | ||
83 | /* IIR can theoretically queue up two events. Be paranoid. */ | |
84 | #define GEN8_IRQ_RESET_NDX(type, which) do { \ | |
85 | I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ | |
86 | POSTING_READ(GEN8_##type##_IMR(which)); \ | |
87 | I915_WRITE(GEN8_##type##_IER(which), 0); \ | |
88 | I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ | |
89 | POSTING_READ(GEN8_##type##_IIR(which)); \ | |
90 | I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ | |
91 | POSTING_READ(GEN8_##type##_IIR(which)); \ | |
92 | } while (0) | |
93 | ||
94 | #define GEN5_IRQ_RESET(type) do { \ | |
95 | I915_WRITE(type##IMR, 0xffffffff); \ | |
96 | POSTING_READ(type##IMR); \ | |
97 | I915_WRITE(type##IER, 0); \ | |
98 | I915_WRITE(type##IIR, 0xffffffff); \ | |
99 | POSTING_READ(type##IIR); \ | |
100 | I915_WRITE(type##IIR, 0xffffffff); \ | |
101 | POSTING_READ(type##IIR); \ | |
102 | } while (0) | |
103 | ||
104 | /* | |
105 | * We should clear IMR at preinstall/uninstall, and just check at postinstall. | |
106 | */ | |
107 | #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ | |
108 | u32 val = I915_READ(reg); \ | |
109 | if (val) { \ | |
110 | WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ | |
111 | (reg), val); \ | |
112 | I915_WRITE((reg), 0xffffffff); \ | |
113 | POSTING_READ(reg); \ | |
114 | I915_WRITE((reg), 0xffffffff); \ | |
115 | POSTING_READ(reg); \ | |
116 | } \ | |
117 | } while (0) | |
118 | ||
119 | #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ | |
120 | GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ | |
121 | I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ | |
122 | I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ | |
123 | POSTING_READ(GEN8_##type##_IER(which)); \ | |
124 | } while (0) | |
125 | ||
126 | #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ | |
127 | GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ | |
128 | I915_WRITE(type##IMR, (imr_val)); \ | |
129 | I915_WRITE(type##IER, (ier_val)); \ | |
130 | POSTING_READ(type##IER); \ | |
131 | } while (0) | |
132 | ||
133 | /* For display hotplug interrupt */ | |
134 | static void | |
135 | ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) | |
136 | { | |
137 | assert_spin_locked(&dev_priv->irq_lock); | |
138 | ||
139 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) | |
140 | return; | |
141 | ||
142 | if ((dev_priv->irq_mask & mask) != 0) { | |
143 | dev_priv->irq_mask &= ~mask; | |
144 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
145 | POSTING_READ(DEIMR); | |
146 | } | |
147 | } | |
148 | ||
149 | static void | |
150 | ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) | |
151 | { | |
152 | assert_spin_locked(&dev_priv->irq_lock); | |
153 | ||
154 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) | |
155 | return; | |
156 | ||
157 | if ((dev_priv->irq_mask & mask) != mask) { | |
158 | dev_priv->irq_mask |= mask; | |
159 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
160 | POSTING_READ(DEIMR); | |
161 | } | |
162 | } | |
163 | ||
164 | /** | |
165 | * ilk_update_gt_irq - update GTIMR | |
166 | * @dev_priv: driver private | |
167 | * @interrupt_mask: mask of interrupt bits to update | |
168 | * @enabled_irq_mask: mask of interrupt bits to enable | |
169 | */ | |
170 | static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, | |
171 | uint32_t interrupt_mask, | |
172 | uint32_t enabled_irq_mask) | |
173 | { | |
174 | assert_spin_locked(&dev_priv->irq_lock); | |
175 | ||
176 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) | |
177 | return; | |
178 | ||
179 | dev_priv->gt_irq_mask &= ~interrupt_mask; | |
180 | dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); | |
181 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
182 | POSTING_READ(GTIMR); | |
183 | } | |
184 | ||
185 | void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) | |
186 | { | |
187 | ilk_update_gt_irq(dev_priv, mask, mask); | |
188 | } | |
189 | ||
190 | void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) | |
191 | { | |
192 | ilk_update_gt_irq(dev_priv, mask, 0); | |
193 | } | |
194 | ||
195 | /** | |
196 | * snb_update_pm_irq - update GEN6_PMIMR | |
197 | * @dev_priv: driver private | |
198 | * @interrupt_mask: mask of interrupt bits to update | |
199 | * @enabled_irq_mask: mask of interrupt bits to enable | |
200 | */ | |
201 | static void snb_update_pm_irq(struct drm_i915_private *dev_priv, | |
202 | uint32_t interrupt_mask, | |
203 | uint32_t enabled_irq_mask) | |
204 | { | |
205 | uint32_t new_val; | |
206 | ||
207 | assert_spin_locked(&dev_priv->irq_lock); | |
208 | ||
209 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) | |
210 | return; | |
211 | ||
212 | new_val = dev_priv->pm_irq_mask; | |
213 | new_val &= ~interrupt_mask; | |
214 | new_val |= (~enabled_irq_mask & interrupt_mask); | |
215 | ||
216 | if (new_val != dev_priv->pm_irq_mask) { | |
217 | dev_priv->pm_irq_mask = new_val; | |
218 | I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); | |
219 | POSTING_READ(GEN6_PMIMR); | |
220 | } | |
221 | } | |
222 | ||
223 | void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) | |
224 | { | |
225 | snb_update_pm_irq(dev_priv, mask, mask); | |
226 | } | |
227 | ||
228 | void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) | |
229 | { | |
230 | snb_update_pm_irq(dev_priv, mask, 0); | |
231 | } | |
232 | ||
233 | static bool ivb_can_enable_err_int(struct drm_device *dev) | |
234 | { | |
235 | struct drm_i915_private *dev_priv = dev->dev_private; | |
236 | struct intel_crtc *crtc; | |
237 | enum pipe pipe; | |
238 | ||
239 | assert_spin_locked(&dev_priv->irq_lock); | |
240 | ||
241 | for_each_pipe(dev_priv, pipe) { | |
242 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | |
243 | ||
244 | if (crtc->cpu_fifo_underrun_disabled) | |
245 | return false; | |
246 | } | |
247 | ||
248 | return true; | |
249 | } | |
250 | ||
251 | /** | |
252 | * bdw_update_pm_irq - update GT interrupt 2 | |
253 | * @dev_priv: driver private | |
254 | * @interrupt_mask: mask of interrupt bits to update | |
255 | * @enabled_irq_mask: mask of interrupt bits to enable | |
256 | * | |
257 | * Copied from the snb function, updated with relevant register offsets | |
258 | */ | |
259 | static void bdw_update_pm_irq(struct drm_i915_private *dev_priv, | |
260 | uint32_t interrupt_mask, | |
261 | uint32_t enabled_irq_mask) | |
262 | { | |
263 | uint32_t new_val; | |
264 | ||
265 | assert_spin_locked(&dev_priv->irq_lock); | |
266 | ||
267 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) | |
268 | return; | |
269 | ||
270 | new_val = dev_priv->pm_irq_mask; | |
271 | new_val &= ~interrupt_mask; | |
272 | new_val |= (~enabled_irq_mask & interrupt_mask); | |
273 | ||
274 | if (new_val != dev_priv->pm_irq_mask) { | |
275 | dev_priv->pm_irq_mask = new_val; | |
276 | I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask); | |
277 | POSTING_READ(GEN8_GT_IMR(2)); | |
278 | } | |
279 | } | |
280 | ||
281 | void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) | |
282 | { | |
283 | bdw_update_pm_irq(dev_priv, mask, mask); | |
284 | } | |
285 | ||
286 | void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) | |
287 | { | |
288 | bdw_update_pm_irq(dev_priv, mask, 0); | |
289 | } | |
290 | ||
291 | static bool cpt_can_enable_serr_int(struct drm_device *dev) | |
292 | { | |
293 | struct drm_i915_private *dev_priv = dev->dev_private; | |
294 | enum pipe pipe; | |
295 | struct intel_crtc *crtc; | |
296 | ||
297 | assert_spin_locked(&dev_priv->irq_lock); | |
298 | ||
299 | for_each_pipe(dev_priv, pipe) { | |
300 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | |
301 | ||
302 | if (crtc->pch_fifo_underrun_disabled) | |
303 | return false; | |
304 | } | |
305 | ||
306 | return true; | |
307 | } | |
308 | ||
309 | void i9xx_check_fifo_underruns(struct drm_device *dev) | |
310 | { | |
311 | struct drm_i915_private *dev_priv = dev->dev_private; | |
312 | struct intel_crtc *crtc; | |
313 | unsigned long flags; | |
314 | ||
315 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
316 | ||
317 | for_each_intel_crtc(dev, crtc) { | |
318 | u32 reg = PIPESTAT(crtc->pipe); | |
319 | u32 pipestat; | |
320 | ||
321 | if (crtc->cpu_fifo_underrun_disabled) | |
322 | continue; | |
323 | ||
324 | pipestat = I915_READ(reg) & 0xffff0000; | |
325 | if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0) | |
326 | continue; | |
327 | ||
328 | I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); | |
329 | POSTING_READ(reg); | |
330 | ||
331 | DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); | |
332 | } | |
333 | ||
334 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
335 | } | |
336 | ||
337 | static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, | |
338 | enum pipe pipe, | |
339 | bool enable, bool old) | |
340 | { | |
341 | struct drm_i915_private *dev_priv = dev->dev_private; | |
342 | u32 reg = PIPESTAT(pipe); | |
343 | u32 pipestat = I915_READ(reg) & 0xffff0000; | |
344 | ||
345 | assert_spin_locked(&dev_priv->irq_lock); | |
346 | ||
347 | if (enable) { | |
348 | I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); | |
349 | POSTING_READ(reg); | |
350 | } else { | |
351 | if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS) | |
352 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); | |
353 | } | |
354 | } | |
355 | ||
356 | static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, | |
357 | enum pipe pipe, bool enable) | |
358 | { | |
359 | struct drm_i915_private *dev_priv = dev->dev_private; | |
360 | uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : | |
361 | DE_PIPEB_FIFO_UNDERRUN; | |
362 | ||
363 | if (enable) | |
364 | ironlake_enable_display_irq(dev_priv, bit); | |
365 | else | |
366 | ironlake_disable_display_irq(dev_priv, bit); | |
367 | } | |
368 | ||
369 | static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, | |
370 | enum pipe pipe, | |
371 | bool enable, bool old) | |
372 | { | |
373 | struct drm_i915_private *dev_priv = dev->dev_private; | |
374 | if (enable) { | |
375 | I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); | |
376 | ||
377 | if (!ivb_can_enable_err_int(dev)) | |
378 | return; | |
379 | ||
380 | ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); | |
381 | } else { | |
382 | ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); | |
383 | ||
384 | if (old && | |
385 | I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { | |
386 | DRM_ERROR("uncleared fifo underrun on pipe %c\n", | |
387 | pipe_name(pipe)); | |
388 | } | |
389 | } | |
390 | } | |
391 | ||
392 | static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, | |
393 | enum pipe pipe, bool enable) | |
394 | { | |
395 | struct drm_i915_private *dev_priv = dev->dev_private; | |
396 | ||
397 | assert_spin_locked(&dev_priv->irq_lock); | |
398 | ||
399 | if (enable) | |
400 | dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; | |
401 | else | |
402 | dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; | |
403 | I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); | |
404 | POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); | |
405 | } | |
406 | ||
407 | /** | |
408 | * ibx_display_interrupt_update - update SDEIMR | |
409 | * @dev_priv: driver private | |
410 | * @interrupt_mask: mask of interrupt bits to update | |
411 | * @enabled_irq_mask: mask of interrupt bits to enable | |
412 | */ | |
413 | static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, | |
414 | uint32_t interrupt_mask, | |
415 | uint32_t enabled_irq_mask) | |
416 | { | |
417 | uint32_t sdeimr = I915_READ(SDEIMR); | |
418 | sdeimr &= ~interrupt_mask; | |
419 | sdeimr |= (~enabled_irq_mask & interrupt_mask); | |
420 | ||
421 | assert_spin_locked(&dev_priv->irq_lock); | |
422 | ||
423 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) | |
424 | return; | |
425 | ||
426 | I915_WRITE(SDEIMR, sdeimr); | |
427 | POSTING_READ(SDEIMR); | |
428 | } | |
429 | #define ibx_enable_display_interrupt(dev_priv, bits) \ | |
430 | ibx_display_interrupt_update((dev_priv), (bits), (bits)) | |
431 | #define ibx_disable_display_interrupt(dev_priv, bits) \ | |
432 | ibx_display_interrupt_update((dev_priv), (bits), 0) | |
433 | ||
434 | static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, | |
435 | enum transcoder pch_transcoder, | |
436 | bool enable) | |
437 | { | |
438 | struct drm_i915_private *dev_priv = dev->dev_private; | |
439 | uint32_t bit = (pch_transcoder == TRANSCODER_A) ? | |
440 | SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; | |
441 | ||
442 | if (enable) | |
443 | ibx_enable_display_interrupt(dev_priv, bit); | |
444 | else | |
445 | ibx_disable_display_interrupt(dev_priv, bit); | |
446 | } | |
447 | ||
448 | static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, | |
449 | enum transcoder pch_transcoder, | |
450 | bool enable, bool old) | |
451 | { | |
452 | struct drm_i915_private *dev_priv = dev->dev_private; | |
453 | ||
454 | if (enable) { | |
455 | I915_WRITE(SERR_INT, | |
456 | SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); | |
457 | ||
458 | if (!cpt_can_enable_serr_int(dev)) | |
459 | return; | |
460 | ||
461 | ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); | |
462 | } else { | |
463 | ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); | |
464 | ||
465 | if (old && I915_READ(SERR_INT) & | |
466 | SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { | |
467 | DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n", | |
468 | transcoder_name(pch_transcoder)); | |
469 | } | |
470 | } | |
471 | } | |
472 | ||
473 | /** | |
474 | * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages | |
475 | * @dev: drm device | |
476 | * @pipe: pipe | |
477 | * @enable: true if we want to report FIFO underrun errors, false otherwise | |
478 | * | |
479 | * This function makes us disable or enable CPU fifo underruns for a specific | |
480 | * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun | |
481 | * reporting for one pipe may also disable all the other CPU error interruts for | |
482 | * the other pipes, due to the fact that there's just one interrupt mask/enable | |
483 | * bit for all the pipes. | |
484 | * | |
485 | * Returns the previous state of underrun reporting. | |
486 | */ | |
487 | static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, | |
488 | enum pipe pipe, bool enable) | |
489 | { | |
490 | struct drm_i915_private *dev_priv = dev->dev_private; | |
491 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
492 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
493 | bool old; | |
494 | ||
495 | assert_spin_locked(&dev_priv->irq_lock); | |
496 | ||
497 | old = !intel_crtc->cpu_fifo_underrun_disabled; | |
498 | intel_crtc->cpu_fifo_underrun_disabled = !enable; | |
499 | ||
500 | if (HAS_GMCH_DISPLAY(dev)) | |
501 | i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); | |
502 | else if (IS_GEN5(dev) || IS_GEN6(dev)) | |
503 | ironlake_set_fifo_underrun_reporting(dev, pipe, enable); | |
504 | else if (IS_GEN7(dev)) | |
505 | ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); | |
506 | else if (IS_GEN8(dev)) | |
507 | broadwell_set_fifo_underrun_reporting(dev, pipe, enable); | |
508 | ||
509 | return old; | |
510 | } | |
511 | ||
512 | bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, | |
513 | enum pipe pipe, bool enable) | |
514 | { | |
515 | struct drm_i915_private *dev_priv = dev->dev_private; | |
516 | unsigned long flags; | |
517 | bool ret; | |
518 | ||
519 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
520 | ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable); | |
521 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
522 | ||
523 | return ret; | |
524 | } | |
525 | ||
526 | static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev, | |
527 | enum pipe pipe) | |
528 | { | |
529 | struct drm_i915_private *dev_priv = dev->dev_private; | |
530 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
531 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
532 | ||
533 | return !intel_crtc->cpu_fifo_underrun_disabled; | |
534 | } | |
535 | ||
536 | /** | |
537 | * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages | |
538 | * @dev: drm device | |
539 | * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) | |
540 | * @enable: true if we want to report FIFO underrun errors, false otherwise | |
541 | * | |
542 | * This function makes us disable or enable PCH fifo underruns for a specific | |
543 | * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO | |
544 | * underrun reporting for one transcoder may also disable all the other PCH | |
545 | * error interruts for the other transcoders, due to the fact that there's just | |
546 | * one interrupt mask/enable bit for all the transcoders. | |
547 | * | |
548 | * Returns the previous state of underrun reporting. | |
549 | */ | |
550 | bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, | |
551 | enum transcoder pch_transcoder, | |
552 | bool enable) | |
553 | { | |
554 | struct drm_i915_private *dev_priv = dev->dev_private; | |
555 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; | |
556 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
557 | unsigned long flags; | |
558 | bool old; | |
559 | ||
560 | /* | |
561 | * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT | |
562 | * has only one pch transcoder A that all pipes can use. To avoid racy | |
563 | * pch transcoder -> pipe lookups from interrupt code simply store the | |
564 | * underrun statistics in crtc A. Since we never expose this anywhere | |
565 | * nor use it outside of the fifo underrun code here using the "wrong" | |
566 | * crtc on LPT won't cause issues. | |
567 | */ | |
568 | ||
569 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
570 | ||
571 | old = !intel_crtc->pch_fifo_underrun_disabled; | |
572 | intel_crtc->pch_fifo_underrun_disabled = !enable; | |
573 | ||
574 | if (HAS_PCH_IBX(dev)) | |
575 | ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); | |
576 | else | |
577 | cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old); | |
578 | ||
579 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
580 | return old; | |
581 | } | |
582 | ||
583 | ||
584 | static void | |
585 | __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, | |
586 | u32 enable_mask, u32 status_mask) | |
587 | { | |
588 | u32 reg = PIPESTAT(pipe); | |
589 | u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; | |
590 | ||
591 | assert_spin_locked(&dev_priv->irq_lock); | |
592 | ||
593 | if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || | |
594 | status_mask & ~PIPESTAT_INT_STATUS_MASK, | |
595 | "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", | |
596 | pipe_name(pipe), enable_mask, status_mask)) | |
597 | return; | |
598 | ||
599 | if ((pipestat & enable_mask) == enable_mask) | |
600 | return; | |
601 | ||
602 | dev_priv->pipestat_irq_mask[pipe] |= status_mask; | |
603 | ||
604 | /* Enable the interrupt, clear any pending status */ | |
605 | pipestat |= enable_mask | status_mask; | |
606 | I915_WRITE(reg, pipestat); | |
607 | POSTING_READ(reg); | |
608 | } | |
609 | ||
610 | static void | |
611 | __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, | |
612 | u32 enable_mask, u32 status_mask) | |
613 | { | |
614 | u32 reg = PIPESTAT(pipe); | |
615 | u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; | |
616 | ||
617 | assert_spin_locked(&dev_priv->irq_lock); | |
618 | ||
619 | if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || | |
620 | status_mask & ~PIPESTAT_INT_STATUS_MASK, | |
621 | "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", | |
622 | pipe_name(pipe), enable_mask, status_mask)) | |
623 | return; | |
624 | ||
625 | if ((pipestat & enable_mask) == 0) | |
626 | return; | |
627 | ||
628 | dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; | |
629 | ||
630 | pipestat &= ~enable_mask; | |
631 | I915_WRITE(reg, pipestat); | |
632 | POSTING_READ(reg); | |
633 | } | |
634 | ||
635 | static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) | |
636 | { | |
637 | u32 enable_mask = status_mask << 16; | |
638 | ||
639 | /* | |
640 | * On pipe A we don't support the PSR interrupt yet, | |
641 | * on pipe B and C the same bit MBZ. | |
642 | */ | |
643 | if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) | |
644 | return 0; | |
645 | /* | |
646 | * On pipe B and C we don't support the PSR interrupt yet, on pipe | |
647 | * A the same bit is for perf counters which we don't use either. | |
648 | */ | |
649 | if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) | |
650 | return 0; | |
651 | ||
652 | enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | | |
653 | SPRITE0_FLIP_DONE_INT_EN_VLV | | |
654 | SPRITE1_FLIP_DONE_INT_EN_VLV); | |
655 | if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) | |
656 | enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; | |
657 | if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) | |
658 | enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; | |
659 | ||
660 | return enable_mask; | |
661 | } | |
662 | ||
663 | void | |
664 | i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, | |
665 | u32 status_mask) | |
666 | { | |
667 | u32 enable_mask; | |
668 | ||
669 | if (IS_VALLEYVIEW(dev_priv->dev)) | |
670 | enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, | |
671 | status_mask); | |
672 | else | |
673 | enable_mask = status_mask << 16; | |
674 | __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); | |
675 | } | |
676 | ||
677 | void | |
678 | i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, | |
679 | u32 status_mask) | |
680 | { | |
681 | u32 enable_mask; | |
682 | ||
683 | if (IS_VALLEYVIEW(dev_priv->dev)) | |
684 | enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, | |
685 | status_mask); | |
686 | else | |
687 | enable_mask = status_mask << 16; | |
688 | __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); | |
689 | } | |
690 | ||
691 | /** | |
692 | * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion | |
693 | */ | |
694 | static void i915_enable_asle_pipestat(struct drm_device *dev) | |
695 | { | |
696 | struct drm_i915_private *dev_priv = dev->dev_private; | |
697 | unsigned long irqflags; | |
698 | ||
699 | if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) | |
700 | return; | |
701 | ||
702 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
703 | ||
704 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); | |
705 | if (INTEL_INFO(dev)->gen >= 4) | |
706 | i915_enable_pipestat(dev_priv, PIPE_A, | |
707 | PIPE_LEGACY_BLC_EVENT_STATUS); | |
708 | ||
709 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
710 | } | |
711 | ||
712 | /** | |
713 | * i915_pipe_enabled - check if a pipe is enabled | |
714 | * @dev: DRM device | |
715 | * @pipe: pipe to check | |
716 | * | |
717 | * Reading certain registers when the pipe is disabled can hang the chip. | |
718 | * Use this routine to make sure the PLL is running and the pipe is active | |
719 | * before reading such registers if unsure. | |
720 | */ | |
721 | static int | |
722 | i915_pipe_enabled(struct drm_device *dev, int pipe) | |
723 | { | |
724 | struct drm_i915_private *dev_priv = dev->dev_private; | |
725 | ||
726 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | |
727 | /* Locking is horribly broken here, but whatever. */ | |
728 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
729 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
730 | ||
731 | return intel_crtc->active; | |
732 | } else { | |
733 | return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; | |
734 | } | |
735 | } | |
736 | ||
737 | /* | |
738 | * This timing diagram depicts the video signal in and | |
739 | * around the vertical blanking period. | |
740 | * | |
741 | * Assumptions about the fictitious mode used in this example: | |
742 | * vblank_start >= 3 | |
743 | * vsync_start = vblank_start + 1 | |
744 | * vsync_end = vblank_start + 2 | |
745 | * vtotal = vblank_start + 3 | |
746 | * | |
747 | * start of vblank: | |
748 | * latch double buffered registers | |
749 | * increment frame counter (ctg+) | |
750 | * generate start of vblank interrupt (gen4+) | |
751 | * | | |
752 | * | frame start: | |
753 | * | generate frame start interrupt (aka. vblank interrupt) (gmch) | |
754 | * | may be shifted forward 1-3 extra lines via PIPECONF | |
755 | * | | | |
756 | * | | start of vsync: | |
757 | * | | generate vsync interrupt | |
758 | * | | | | |
759 | * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx | |
760 | * . \hs/ . \hs/ \hs/ \hs/ . \hs/ | |
761 | * ----va---> <-----------------vb--------------------> <--------va------------- | |
762 | * | | <----vs-----> | | |
763 | * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) | |
764 | * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) | |
765 | * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) | |
766 | * | | | | |
767 | * last visible pixel first visible pixel | |
768 | * | increment frame counter (gen3/4) | |
769 | * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) | |
770 | * | |
771 | * x = horizontal active | |
772 | * _ = horizontal blanking | |
773 | * hs = horizontal sync | |
774 | * va = vertical active | |
775 | * vb = vertical blanking | |
776 | * vs = vertical sync | |
777 | * vbs = vblank_start (number) | |
778 | * | |
779 | * Summary: | |
780 | * - most events happen at the start of horizontal sync | |
781 | * - frame start happens at the start of horizontal blank, 1-4 lines | |
782 | * (depending on PIPECONF settings) after the start of vblank | |
783 | * - gen3/4 pixel and frame counter are synchronized with the start | |
784 | * of horizontal active on the first line of vertical active | |
785 | */ | |
786 | ||
787 | static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) | |
788 | { | |
789 | /* Gen2 doesn't have a hardware frame counter */ | |
790 | return 0; | |
791 | } | |
792 | ||
793 | /* Called from drm generic code, passed a 'crtc', which | |
794 | * we use as a pipe index | |
795 | */ | |
796 | static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |
797 | { | |
798 | struct drm_i915_private *dev_priv = dev->dev_private; | |
799 | unsigned long high_frame; | |
800 | unsigned long low_frame; | |
801 | u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; | |
802 | ||
803 | if (!i915_pipe_enabled(dev, pipe)) { | |
804 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " | |
805 | "pipe %c\n", pipe_name(pipe)); | |
806 | return 0; | |
807 | } | |
808 | ||
809 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | |
810 | struct intel_crtc *intel_crtc = | |
811 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | |
812 | const struct drm_display_mode *mode = | |
813 | &intel_crtc->config.adjusted_mode; | |
814 | ||
815 | htotal = mode->crtc_htotal; | |
816 | hsync_start = mode->crtc_hsync_start; | |
817 | vbl_start = mode->crtc_vblank_start; | |
818 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | |
819 | vbl_start = DIV_ROUND_UP(vbl_start, 2); | |
820 | } else { | |
821 | enum transcoder cpu_transcoder = (enum transcoder) pipe; | |
822 | ||
823 | htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; | |
824 | hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1; | |
825 | vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; | |
826 | if ((I915_READ(PIPECONF(cpu_transcoder)) & | |
827 | PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE) | |
828 | vbl_start = DIV_ROUND_UP(vbl_start, 2); | |
829 | } | |
830 | ||
831 | /* Convert to pixel count */ | |
832 | vbl_start *= htotal; | |
833 | ||
834 | /* Start of vblank event occurs at start of hsync */ | |
835 | vbl_start -= htotal - hsync_start; | |
836 | ||
837 | high_frame = PIPEFRAME(pipe); | |
838 | low_frame = PIPEFRAMEPIXEL(pipe); | |
839 | ||
840 | /* | |
841 | * High & low register fields aren't synchronized, so make sure | |
842 | * we get a low value that's stable across two reads of the high | |
843 | * register. | |
844 | */ | |
845 | do { | |
846 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; | |
847 | low = I915_READ(low_frame); | |
848 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; | |
849 | } while (high1 != high2); | |
850 | ||
851 | high1 >>= PIPE_FRAME_HIGH_SHIFT; | |
852 | pixel = low & PIPE_PIXEL_MASK; | |
853 | low >>= PIPE_FRAME_LOW_SHIFT; | |
854 | ||
855 | /* | |
856 | * The frame counter increments at beginning of active. | |
857 | * Cook up a vblank counter by also checking the pixel | |
858 | * counter against vblank start. | |
859 | */ | |
860 | return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; | |
861 | } | |
862 | ||
863 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | |
864 | { | |
865 | struct drm_i915_private *dev_priv = dev->dev_private; | |
866 | int reg = PIPE_FRMCOUNT_GM45(pipe); | |
867 | ||
868 | if (!i915_pipe_enabled(dev, pipe)) { | |
869 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " | |
870 | "pipe %c\n", pipe_name(pipe)); | |
871 | return 0; | |
872 | } | |
873 | ||
874 | return I915_READ(reg); | |
875 | } | |
876 | ||
877 | /* raw reads, only for fast reads of display block, no need for forcewake etc. */ | |
878 | #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) | |
879 | ||
880 | static int __intel_get_crtc_scanline(struct intel_crtc *crtc) | |
881 | { | |
882 | struct drm_device *dev = crtc->base.dev; | |
883 | struct drm_i915_private *dev_priv = dev->dev_private; | |
884 | const struct drm_display_mode *mode = &crtc->config.adjusted_mode; | |
885 | enum pipe pipe = crtc->pipe; | |
886 | int position, vtotal; | |
887 | ||
888 | vtotal = mode->crtc_vtotal; | |
889 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | |
890 | vtotal /= 2; | |
891 | ||
892 | if (IS_GEN2(dev)) | |
893 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; | |
894 | else | |
895 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; | |
896 | ||
897 | /* | |
898 | * See update_scanline_offset() for the details on the | |
899 | * scanline_offset adjustment. | |
900 | */ | |
901 | return (position + crtc->scanline_offset) % vtotal; | |
902 | } | |
903 | ||
904 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | |
905 | unsigned int flags, int *vpos, int *hpos, | |
906 | ktime_t *stime, ktime_t *etime) | |
907 | { | |
908 | struct drm_i915_private *dev_priv = dev->dev_private; | |
909 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
910 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
911 | const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; | |
912 | int position; | |
913 | int vbl_start, vbl_end, hsync_start, htotal, vtotal; | |
914 | bool in_vbl = true; | |
915 | int ret = 0; | |
916 | unsigned long irqflags; | |
917 | ||
918 | if (!intel_crtc->active) { | |
919 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " | |
920 | "pipe %c\n", pipe_name(pipe)); | |
921 | return 0; | |
922 | } | |
923 | ||
924 | htotal = mode->crtc_htotal; | |
925 | hsync_start = mode->crtc_hsync_start; | |
926 | vtotal = mode->crtc_vtotal; | |
927 | vbl_start = mode->crtc_vblank_start; | |
928 | vbl_end = mode->crtc_vblank_end; | |
929 | ||
930 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) { | |
931 | vbl_start = DIV_ROUND_UP(vbl_start, 2); | |
932 | vbl_end /= 2; | |
933 | vtotal /= 2; | |
934 | } | |
935 | ||
936 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; | |
937 | ||
938 | /* | |
939 | * Lock uncore.lock, as we will do multiple timing critical raw | |
940 | * register reads, potentially with preemption disabled, so the | |
941 | * following code must not block on uncore.lock. | |
942 | */ | |
943 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | |
944 | ||
945 | /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ | |
946 | ||
947 | /* Get optional system timestamp before query. */ | |
948 | if (stime) | |
949 | *stime = ktime_get(); | |
950 | ||
951 | if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | |
952 | /* No obvious pixelcount register. Only query vertical | |
953 | * scanout position from Display scan line register. | |
954 | */ | |
955 | position = __intel_get_crtc_scanline(intel_crtc); | |
956 | } else { | |
957 | /* Have access to pixelcount since start of frame. | |
958 | * We can split this into vertical and horizontal | |
959 | * scanout position. | |
960 | */ | |
961 | position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; | |
962 | ||
963 | /* convert to pixel counts */ | |
964 | vbl_start *= htotal; | |
965 | vbl_end *= htotal; | |
966 | vtotal *= htotal; | |
967 | ||
968 | /* | |
969 | * In interlaced modes, the pixel counter counts all pixels, | |
970 | * so one field will have htotal more pixels. In order to avoid | |
971 | * the reported position from jumping backwards when the pixel | |
972 | * counter is beyond the length of the shorter field, just | |
973 | * clamp the position the length of the shorter field. This | |
974 | * matches how the scanline counter based position works since | |
975 | * the scanline counter doesn't count the two half lines. | |
976 | */ | |
977 | if (position >= vtotal) | |
978 | position = vtotal - 1; | |
979 | ||
980 | /* | |
981 | * Start of vblank interrupt is triggered at start of hsync, | |
982 | * just prior to the first active line of vblank. However we | |
983 | * consider lines to start at the leading edge of horizontal | |
984 | * active. So, should we get here before we've crossed into | |
985 | * the horizontal active of the first line in vblank, we would | |
986 | * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, | |
987 | * always add htotal-hsync_start to the current pixel position. | |
988 | */ | |
989 | position = (position + htotal - hsync_start) % vtotal; | |
990 | } | |
991 | ||
992 | /* Get optional system timestamp after query. */ | |
993 | if (etime) | |
994 | *etime = ktime_get(); | |
995 | ||
996 | /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ | |
997 | ||
998 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | |
999 | ||
1000 | in_vbl = position >= vbl_start && position < vbl_end; | |
1001 | ||
1002 | /* | |
1003 | * While in vblank, position will be negative | |
1004 | * counting up towards 0 at vbl_end. And outside | |
1005 | * vblank, position will be positive counting | |
1006 | * up since vbl_end. | |
1007 | */ | |
1008 | if (position >= vbl_start) | |
1009 | position -= vbl_end; | |
1010 | else | |
1011 | position += vtotal - vbl_end; | |
1012 | ||
1013 | if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | |
1014 | *vpos = position; | |
1015 | *hpos = 0; | |
1016 | } else { | |
1017 | *vpos = position / htotal; | |
1018 | *hpos = position - (*vpos * htotal); | |
1019 | } | |
1020 | ||
1021 | /* In vblank? */ | |
1022 | if (in_vbl) | |
1023 | ret |= DRM_SCANOUTPOS_IN_VBLANK; | |
1024 | ||
1025 | return ret; | |
1026 | } | |
1027 | ||
1028 | int intel_get_crtc_scanline(struct intel_crtc *crtc) | |
1029 | { | |
1030 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | |
1031 | unsigned long irqflags; | |
1032 | int position; | |
1033 | ||
1034 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | |
1035 | position = __intel_get_crtc_scanline(crtc); | |
1036 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | |
1037 | ||
1038 | return position; | |
1039 | } | |
1040 | ||
1041 | static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, | |
1042 | int *max_error, | |
1043 | struct timeval *vblank_time, | |
1044 | unsigned flags) | |
1045 | { | |
1046 | struct drm_crtc *crtc; | |
1047 | ||
1048 | if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { | |
1049 | DRM_ERROR("Invalid crtc %d\n", pipe); | |
1050 | return -EINVAL; | |
1051 | } | |
1052 | ||
1053 | /* Get drm_crtc to timestamp: */ | |
1054 | crtc = intel_get_crtc_for_pipe(dev, pipe); | |
1055 | if (crtc == NULL) { | |
1056 | DRM_ERROR("Invalid crtc %d\n", pipe); | |
1057 | return -EINVAL; | |
1058 | } | |
1059 | ||
1060 | if (!crtc->enabled) { | |
1061 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | |
1062 | return -EBUSY; | |
1063 | } | |
1064 | ||
1065 | /* Helper routine in DRM core does all the work: */ | |
1066 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, | |
1067 | vblank_time, flags, | |
1068 | crtc, | |
1069 | &to_intel_crtc(crtc)->config.adjusted_mode); | |
1070 | } | |
1071 | ||
1072 | static bool intel_hpd_irq_event(struct drm_device *dev, | |
1073 | struct drm_connector *connector) | |
1074 | { | |
1075 | enum drm_connector_status old_status; | |
1076 | ||
1077 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | |
1078 | old_status = connector->status; | |
1079 | ||
1080 | connector->status = connector->funcs->detect(connector, false); | |
1081 | if (old_status == connector->status) | |
1082 | return false; | |
1083 | ||
1084 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", | |
1085 | connector->base.id, | |
1086 | connector->name, | |
1087 | drm_get_connector_status_name(old_status), | |
1088 | drm_get_connector_status_name(connector->status)); | |
1089 | ||
1090 | return true; | |
1091 | } | |
1092 | ||
1093 | static void i915_digport_work_func(struct work_struct *work) | |
1094 | { | |
1095 | struct drm_i915_private *dev_priv = | |
1096 | container_of(work, struct drm_i915_private, dig_port_work); | |
1097 | unsigned long irqflags; | |
1098 | u32 long_port_mask, short_port_mask; | |
1099 | struct intel_digital_port *intel_dig_port; | |
1100 | int i, ret; | |
1101 | u32 old_bits = 0; | |
1102 | ||
1103 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1104 | long_port_mask = dev_priv->long_hpd_port_mask; | |
1105 | dev_priv->long_hpd_port_mask = 0; | |
1106 | short_port_mask = dev_priv->short_hpd_port_mask; | |
1107 | dev_priv->short_hpd_port_mask = 0; | |
1108 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1109 | ||
1110 | for (i = 0; i < I915_MAX_PORTS; i++) { | |
1111 | bool valid = false; | |
1112 | bool long_hpd = false; | |
1113 | intel_dig_port = dev_priv->hpd_irq_port[i]; | |
1114 | if (!intel_dig_port || !intel_dig_port->hpd_pulse) | |
1115 | continue; | |
1116 | ||
1117 | if (long_port_mask & (1 << i)) { | |
1118 | valid = true; | |
1119 | long_hpd = true; | |
1120 | } else if (short_port_mask & (1 << i)) | |
1121 | valid = true; | |
1122 | ||
1123 | if (valid) { | |
1124 | ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); | |
1125 | if (ret == true) { | |
1126 | /* if we get true fallback to old school hpd */ | |
1127 | old_bits |= (1 << intel_dig_port->base.hpd_pin); | |
1128 | } | |
1129 | } | |
1130 | } | |
1131 | ||
1132 | if (old_bits) { | |
1133 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1134 | dev_priv->hpd_event_bits |= old_bits; | |
1135 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1136 | schedule_work(&dev_priv->hotplug_work); | |
1137 | } | |
1138 | } | |
1139 | ||
1140 | /* | |
1141 | * Handle hotplug events outside the interrupt handler proper. | |
1142 | */ | |
1143 | #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) | |
1144 | ||
1145 | static void i915_hotplug_work_func(struct work_struct *work) | |
1146 | { | |
1147 | struct drm_i915_private *dev_priv = | |
1148 | container_of(work, struct drm_i915_private, hotplug_work); | |
1149 | struct drm_device *dev = dev_priv->dev; | |
1150 | struct drm_mode_config *mode_config = &dev->mode_config; | |
1151 | struct intel_connector *intel_connector; | |
1152 | struct intel_encoder *intel_encoder; | |
1153 | struct drm_connector *connector; | |
1154 | unsigned long irqflags; | |
1155 | bool hpd_disabled = false; | |
1156 | bool changed = false; | |
1157 | u32 hpd_event_bits; | |
1158 | ||
1159 | mutex_lock(&mode_config->mutex); | |
1160 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); | |
1161 | ||
1162 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1163 | ||
1164 | hpd_event_bits = dev_priv->hpd_event_bits; | |
1165 | dev_priv->hpd_event_bits = 0; | |
1166 | list_for_each_entry(connector, &mode_config->connector_list, head) { | |
1167 | intel_connector = to_intel_connector(connector); | |
1168 | if (!intel_connector->encoder) | |
1169 | continue; | |
1170 | intel_encoder = intel_connector->encoder; | |
1171 | if (intel_encoder->hpd_pin > HPD_NONE && | |
1172 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && | |
1173 | connector->polled == DRM_CONNECTOR_POLL_HPD) { | |
1174 | DRM_INFO("HPD interrupt storm detected on connector %s: " | |
1175 | "switching from hotplug detection to polling\n", | |
1176 | connector->name); | |
1177 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; | |
1178 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | |
1179 | | DRM_CONNECTOR_POLL_DISCONNECT; | |
1180 | hpd_disabled = true; | |
1181 | } | |
1182 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { | |
1183 | DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", | |
1184 | connector->name, intel_encoder->hpd_pin); | |
1185 | } | |
1186 | } | |
1187 | /* if there were no outputs to poll, poll was disabled, | |
1188 | * therefore make sure it's enabled when disabling HPD on | |
1189 | * some connectors */ | |
1190 | if (hpd_disabled) { | |
1191 | drm_kms_helper_poll_enable(dev); | |
1192 | mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, | |
1193 | msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); | |
1194 | } | |
1195 | ||
1196 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1197 | ||
1198 | list_for_each_entry(connector, &mode_config->connector_list, head) { | |
1199 | intel_connector = to_intel_connector(connector); | |
1200 | if (!intel_connector->encoder) | |
1201 | continue; | |
1202 | intel_encoder = intel_connector->encoder; | |
1203 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { | |
1204 | if (intel_encoder->hot_plug) | |
1205 | intel_encoder->hot_plug(intel_encoder); | |
1206 | if (intel_hpd_irq_event(dev, connector)) | |
1207 | changed = true; | |
1208 | } | |
1209 | } | |
1210 | mutex_unlock(&mode_config->mutex); | |
1211 | ||
1212 | if (changed) | |
1213 | drm_kms_helper_hotplug_event(dev); | |
1214 | } | |
1215 | ||
1216 | static void ironlake_rps_change_irq_handler(struct drm_device *dev) | |
1217 | { | |
1218 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1219 | u32 busy_up, busy_down, max_avg, min_avg; | |
1220 | u8 new_delay; | |
1221 | ||
1222 | spin_lock(&mchdev_lock); | |
1223 | ||
1224 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); | |
1225 | ||
1226 | new_delay = dev_priv->ips.cur_delay; | |
1227 | ||
1228 | I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); | |
1229 | busy_up = I915_READ(RCPREVBSYTUPAVG); | |
1230 | busy_down = I915_READ(RCPREVBSYTDNAVG); | |
1231 | max_avg = I915_READ(RCBMAXAVG); | |
1232 | min_avg = I915_READ(RCBMINAVG); | |
1233 | ||
1234 | /* Handle RCS change request from hw */ | |
1235 | if (busy_up > max_avg) { | |
1236 | if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) | |
1237 | new_delay = dev_priv->ips.cur_delay - 1; | |
1238 | if (new_delay < dev_priv->ips.max_delay) | |
1239 | new_delay = dev_priv->ips.max_delay; | |
1240 | } else if (busy_down < min_avg) { | |
1241 | if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) | |
1242 | new_delay = dev_priv->ips.cur_delay + 1; | |
1243 | if (new_delay > dev_priv->ips.min_delay) | |
1244 | new_delay = dev_priv->ips.min_delay; | |
1245 | } | |
1246 | ||
1247 | if (ironlake_set_drps(dev, new_delay)) | |
1248 | dev_priv->ips.cur_delay = new_delay; | |
1249 | ||
1250 | spin_unlock(&mchdev_lock); | |
1251 | ||
1252 | return; | |
1253 | } | |
1254 | ||
1255 | static void notify_ring(struct drm_device *dev, | |
1256 | struct intel_engine_cs *ring) | |
1257 | { | |
1258 | if (!intel_ring_initialized(ring)) | |
1259 | return; | |
1260 | ||
1261 | trace_i915_gem_request_complete(ring); | |
1262 | ||
1263 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
1264 | intel_notify_mmio_flip(ring); | |
1265 | ||
1266 | wake_up_all(&ring->irq_queue); | |
1267 | i915_queue_hangcheck(dev); | |
1268 | } | |
1269 | ||
1270 | static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, | |
1271 | struct intel_rps_ei *rps_ei) | |
1272 | { | |
1273 | u32 cz_ts, cz_freq_khz; | |
1274 | u32 render_count, media_count; | |
1275 | u32 elapsed_render, elapsed_media, elapsed_time; | |
1276 | u32 residency = 0; | |
1277 | ||
1278 | cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); | |
1279 | cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4); | |
1280 | ||
1281 | render_count = I915_READ(VLV_RENDER_C0_COUNT_REG); | |
1282 | media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG); | |
1283 | ||
1284 | if (rps_ei->cz_clock == 0) { | |
1285 | rps_ei->cz_clock = cz_ts; | |
1286 | rps_ei->render_c0 = render_count; | |
1287 | rps_ei->media_c0 = media_count; | |
1288 | ||
1289 | return dev_priv->rps.cur_freq; | |
1290 | } | |
1291 | ||
1292 | elapsed_time = cz_ts - rps_ei->cz_clock; | |
1293 | rps_ei->cz_clock = cz_ts; | |
1294 | ||
1295 | elapsed_render = render_count - rps_ei->render_c0; | |
1296 | rps_ei->render_c0 = render_count; | |
1297 | ||
1298 | elapsed_media = media_count - rps_ei->media_c0; | |
1299 | rps_ei->media_c0 = media_count; | |
1300 | ||
1301 | /* Convert all the counters into common unit of milli sec */ | |
1302 | elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; | |
1303 | elapsed_render /= cz_freq_khz; | |
1304 | elapsed_media /= cz_freq_khz; | |
1305 | ||
1306 | /* | |
1307 | * Calculate overall C0 residency percentage | |
1308 | * only if elapsed time is non zero | |
1309 | */ | |
1310 | if (elapsed_time) { | |
1311 | residency = | |
1312 | ((max(elapsed_render, elapsed_media) * 100) | |
1313 | / elapsed_time); | |
1314 | } | |
1315 | ||
1316 | return residency; | |
1317 | } | |
1318 | ||
1319 | /** | |
1320 | * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU | |
1321 | * busy-ness calculated from C0 counters of render & media power wells | |
1322 | * @dev_priv: DRM device private | |
1323 | * | |
1324 | */ | |
1325 | static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) | |
1326 | { | |
1327 | u32 residency_C0_up = 0, residency_C0_down = 0; | |
1328 | int new_delay, adj; | |
1329 | ||
1330 | dev_priv->rps.ei_interrupt_count++; | |
1331 | ||
1332 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | |
1333 | ||
1334 | ||
1335 | if (dev_priv->rps.up_ei.cz_clock == 0) { | |
1336 | vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei); | |
1337 | vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei); | |
1338 | return dev_priv->rps.cur_freq; | |
1339 | } | |
1340 | ||
1341 | ||
1342 | /* | |
1343 | * To down throttle, C0 residency should be less than down threshold | |
1344 | * for continous EI intervals. So calculate down EI counters | |
1345 | * once in VLV_INT_COUNT_FOR_DOWN_EI | |
1346 | */ | |
1347 | if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) { | |
1348 | ||
1349 | dev_priv->rps.ei_interrupt_count = 0; | |
1350 | ||
1351 | residency_C0_down = vlv_c0_residency(dev_priv, | |
1352 | &dev_priv->rps.down_ei); | |
1353 | } else { | |
1354 | residency_C0_up = vlv_c0_residency(dev_priv, | |
1355 | &dev_priv->rps.up_ei); | |
1356 | } | |
1357 | ||
1358 | new_delay = dev_priv->rps.cur_freq; | |
1359 | ||
1360 | adj = dev_priv->rps.last_adj; | |
1361 | /* C0 residency is greater than UP threshold. Increase Frequency */ | |
1362 | if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) { | |
1363 | if (adj > 0) | |
1364 | adj *= 2; | |
1365 | else | |
1366 | adj = 1; | |
1367 | ||
1368 | if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit) | |
1369 | new_delay = dev_priv->rps.cur_freq + adj; | |
1370 | ||
1371 | /* | |
1372 | * For better performance, jump directly | |
1373 | * to RPe if we're below it. | |
1374 | */ | |
1375 | if (new_delay < dev_priv->rps.efficient_freq) | |
1376 | new_delay = dev_priv->rps.efficient_freq; | |
1377 | ||
1378 | } else if (!dev_priv->rps.ei_interrupt_count && | |
1379 | (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) { | |
1380 | if (adj < 0) | |
1381 | adj *= 2; | |
1382 | else | |
1383 | adj = -1; | |
1384 | /* | |
1385 | * This means, C0 residency is less than down threshold over | |
1386 | * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq | |
1387 | */ | |
1388 | if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) | |
1389 | new_delay = dev_priv->rps.cur_freq + adj; | |
1390 | } | |
1391 | ||
1392 | return new_delay; | |
1393 | } | |
1394 | ||
1395 | static void gen6_pm_rps_work(struct work_struct *work) | |
1396 | { | |
1397 | struct drm_i915_private *dev_priv = | |
1398 | container_of(work, struct drm_i915_private, rps.work); | |
1399 | u32 pm_iir; | |
1400 | int new_delay, adj; | |
1401 | ||
1402 | spin_lock_irq(&dev_priv->irq_lock); | |
1403 | pm_iir = dev_priv->rps.pm_iir; | |
1404 | dev_priv->rps.pm_iir = 0; | |
1405 | if (INTEL_INFO(dev_priv->dev)->gen >= 8) | |
1406 | gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); | |
1407 | else { | |
1408 | /* Make sure not to corrupt PMIMR state used by ringbuffer */ | |
1409 | gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); | |
1410 | } | |
1411 | spin_unlock_irq(&dev_priv->irq_lock); | |
1412 | ||
1413 | /* Make sure we didn't queue anything we're not going to process. */ | |
1414 | WARN_ON(pm_iir & ~dev_priv->pm_rps_events); | |
1415 | ||
1416 | if ((pm_iir & dev_priv->pm_rps_events) == 0) | |
1417 | return; | |
1418 | ||
1419 | mutex_lock(&dev_priv->rps.hw_lock); | |
1420 | ||
1421 | adj = dev_priv->rps.last_adj; | |
1422 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { | |
1423 | if (adj > 0) | |
1424 | adj *= 2; | |
1425 | else { | |
1426 | /* CHV needs even encode values */ | |
1427 | adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1; | |
1428 | } | |
1429 | new_delay = dev_priv->rps.cur_freq + adj; | |
1430 | ||
1431 | /* | |
1432 | * For better performance, jump directly | |
1433 | * to RPe if we're below it. | |
1434 | */ | |
1435 | if (new_delay < dev_priv->rps.efficient_freq) | |
1436 | new_delay = dev_priv->rps.efficient_freq; | |
1437 | } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { | |
1438 | if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) | |
1439 | new_delay = dev_priv->rps.efficient_freq; | |
1440 | else | |
1441 | new_delay = dev_priv->rps.min_freq_softlimit; | |
1442 | adj = 0; | |
1443 | } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { | |
1444 | new_delay = vlv_calc_delay_from_C0_counters(dev_priv); | |
1445 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { | |
1446 | if (adj < 0) | |
1447 | adj *= 2; | |
1448 | else { | |
1449 | /* CHV needs even encode values */ | |
1450 | adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1; | |
1451 | } | |
1452 | new_delay = dev_priv->rps.cur_freq + adj; | |
1453 | } else { /* unknown event */ | |
1454 | new_delay = dev_priv->rps.cur_freq; | |
1455 | } | |
1456 | ||
1457 | /* sysfs frequency interfaces may have snuck in while servicing the | |
1458 | * interrupt | |
1459 | */ | |
1460 | new_delay = clamp_t(int, new_delay, | |
1461 | dev_priv->rps.min_freq_softlimit, | |
1462 | dev_priv->rps.max_freq_softlimit); | |
1463 | ||
1464 | dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; | |
1465 | ||
1466 | if (IS_VALLEYVIEW(dev_priv->dev)) | |
1467 | valleyview_set_rps(dev_priv->dev, new_delay); | |
1468 | else | |
1469 | gen6_set_rps(dev_priv->dev, new_delay); | |
1470 | ||
1471 | mutex_unlock(&dev_priv->rps.hw_lock); | |
1472 | } | |
1473 | ||
1474 | ||
1475 | /** | |
1476 | * ivybridge_parity_work - Workqueue called when a parity error interrupt | |
1477 | * occurred. | |
1478 | * @work: workqueue struct | |
1479 | * | |
1480 | * Doesn't actually do anything except notify userspace. As a consequence of | |
1481 | * this event, userspace should try to remap the bad rows since statistically | |
1482 | * it is likely the same row is more likely to go bad again. | |
1483 | */ | |
1484 | static void ivybridge_parity_work(struct work_struct *work) | |
1485 | { | |
1486 | struct drm_i915_private *dev_priv = | |
1487 | container_of(work, struct drm_i915_private, l3_parity.error_work); | |
1488 | u32 error_status, row, bank, subbank; | |
1489 | char *parity_event[6]; | |
1490 | uint32_t misccpctl; | |
1491 | unsigned long flags; | |
1492 | uint8_t slice = 0; | |
1493 | ||
1494 | /* We must turn off DOP level clock gating to access the L3 registers. | |
1495 | * In order to prevent a get/put style interface, acquire struct mutex | |
1496 | * any time we access those registers. | |
1497 | */ | |
1498 | mutex_lock(&dev_priv->dev->struct_mutex); | |
1499 | ||
1500 | /* If we've screwed up tracking, just let the interrupt fire again */ | |
1501 | if (WARN_ON(!dev_priv->l3_parity.which_slice)) | |
1502 | goto out; | |
1503 | ||
1504 | misccpctl = I915_READ(GEN7_MISCCPCTL); | |
1505 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | |
1506 | POSTING_READ(GEN7_MISCCPCTL); | |
1507 | ||
1508 | while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { | |
1509 | u32 reg; | |
1510 | ||
1511 | slice--; | |
1512 | if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) | |
1513 | break; | |
1514 | ||
1515 | dev_priv->l3_parity.which_slice &= ~(1<<slice); | |
1516 | ||
1517 | reg = GEN7_L3CDERRST1 + (slice * 0x200); | |
1518 | ||
1519 | error_status = I915_READ(reg); | |
1520 | row = GEN7_PARITY_ERROR_ROW(error_status); | |
1521 | bank = GEN7_PARITY_ERROR_BANK(error_status); | |
1522 | subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); | |
1523 | ||
1524 | I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); | |
1525 | POSTING_READ(reg); | |
1526 | ||
1527 | parity_event[0] = I915_L3_PARITY_UEVENT "=1"; | |
1528 | parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); | |
1529 | parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); | |
1530 | parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); | |
1531 | parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); | |
1532 | parity_event[5] = NULL; | |
1533 | ||
1534 | kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, | |
1535 | KOBJ_CHANGE, parity_event); | |
1536 | ||
1537 | DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", | |
1538 | slice, row, bank, subbank); | |
1539 | ||
1540 | kfree(parity_event[4]); | |
1541 | kfree(parity_event[3]); | |
1542 | kfree(parity_event[2]); | |
1543 | kfree(parity_event[1]); | |
1544 | } | |
1545 | ||
1546 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); | |
1547 | ||
1548 | out: | |
1549 | WARN_ON(dev_priv->l3_parity.which_slice); | |
1550 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
1551 | gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); | |
1552 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
1553 | ||
1554 | mutex_unlock(&dev_priv->dev->struct_mutex); | |
1555 | } | |
1556 | ||
1557 | static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) | |
1558 | { | |
1559 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1560 | ||
1561 | if (!HAS_L3_DPF(dev)) | |
1562 | return; | |
1563 | ||
1564 | spin_lock(&dev_priv->irq_lock); | |
1565 | gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); | |
1566 | spin_unlock(&dev_priv->irq_lock); | |
1567 | ||
1568 | iir &= GT_PARITY_ERROR(dev); | |
1569 | if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) | |
1570 | dev_priv->l3_parity.which_slice |= 1 << 1; | |
1571 | ||
1572 | if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) | |
1573 | dev_priv->l3_parity.which_slice |= 1 << 0; | |
1574 | ||
1575 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); | |
1576 | } | |
1577 | ||
1578 | static void ilk_gt_irq_handler(struct drm_device *dev, | |
1579 | struct drm_i915_private *dev_priv, | |
1580 | u32 gt_iir) | |
1581 | { | |
1582 | if (gt_iir & | |
1583 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | |
1584 | notify_ring(dev, &dev_priv->ring[RCS]); | |
1585 | if (gt_iir & ILK_BSD_USER_INTERRUPT) | |
1586 | notify_ring(dev, &dev_priv->ring[VCS]); | |
1587 | } | |
1588 | ||
1589 | static void snb_gt_irq_handler(struct drm_device *dev, | |
1590 | struct drm_i915_private *dev_priv, | |
1591 | u32 gt_iir) | |
1592 | { | |
1593 | ||
1594 | if (gt_iir & | |
1595 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | |
1596 | notify_ring(dev, &dev_priv->ring[RCS]); | |
1597 | if (gt_iir & GT_BSD_USER_INTERRUPT) | |
1598 | notify_ring(dev, &dev_priv->ring[VCS]); | |
1599 | if (gt_iir & GT_BLT_USER_INTERRUPT) | |
1600 | notify_ring(dev, &dev_priv->ring[BCS]); | |
1601 | ||
1602 | if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | | |
1603 | GT_BSD_CS_ERROR_INTERRUPT | | |
1604 | GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { | |
1605 | i915_handle_error(dev, false, "GT error interrupt 0x%08x", | |
1606 | gt_iir); | |
1607 | } | |
1608 | ||
1609 | if (gt_iir & GT_PARITY_ERROR(dev)) | |
1610 | ivybridge_parity_error_irq_handler(dev, gt_iir); | |
1611 | } | |
1612 | ||
1613 | static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) | |
1614 | { | |
1615 | if ((pm_iir & dev_priv->pm_rps_events) == 0) | |
1616 | return; | |
1617 | ||
1618 | spin_lock(&dev_priv->irq_lock); | |
1619 | dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; | |
1620 | gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); | |
1621 | spin_unlock(&dev_priv->irq_lock); | |
1622 | ||
1623 | queue_work(dev_priv->wq, &dev_priv->rps.work); | |
1624 | } | |
1625 | ||
1626 | static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, | |
1627 | struct drm_i915_private *dev_priv, | |
1628 | u32 master_ctl) | |
1629 | { | |
1630 | struct intel_engine_cs *ring; | |
1631 | u32 rcs, bcs, vcs; | |
1632 | uint32_t tmp = 0; | |
1633 | irqreturn_t ret = IRQ_NONE; | |
1634 | ||
1635 | if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { | |
1636 | tmp = I915_READ(GEN8_GT_IIR(0)); | |
1637 | if (tmp) { | |
1638 | I915_WRITE(GEN8_GT_IIR(0), tmp); | |
1639 | ret = IRQ_HANDLED; | |
1640 | ||
1641 | rcs = tmp >> GEN8_RCS_IRQ_SHIFT; | |
1642 | ring = &dev_priv->ring[RCS]; | |
1643 | if (rcs & GT_RENDER_USER_INTERRUPT) | |
1644 | notify_ring(dev, ring); | |
1645 | if (rcs & GT_CONTEXT_SWITCH_INTERRUPT) | |
1646 | intel_execlists_handle_ctx_events(ring); | |
1647 | ||
1648 | bcs = tmp >> GEN8_BCS_IRQ_SHIFT; | |
1649 | ring = &dev_priv->ring[BCS]; | |
1650 | if (bcs & GT_RENDER_USER_INTERRUPT) | |
1651 | notify_ring(dev, ring); | |
1652 | if (bcs & GT_CONTEXT_SWITCH_INTERRUPT) | |
1653 | intel_execlists_handle_ctx_events(ring); | |
1654 | } else | |
1655 | DRM_ERROR("The master control interrupt lied (GT0)!\n"); | |
1656 | } | |
1657 | ||
1658 | if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { | |
1659 | tmp = I915_READ(GEN8_GT_IIR(1)); | |
1660 | if (tmp) { | |
1661 | I915_WRITE(GEN8_GT_IIR(1), tmp); | |
1662 | ret = IRQ_HANDLED; | |
1663 | ||
1664 | vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; | |
1665 | ring = &dev_priv->ring[VCS]; | |
1666 | if (vcs & GT_RENDER_USER_INTERRUPT) | |
1667 | notify_ring(dev, ring); | |
1668 | if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) | |
1669 | intel_execlists_handle_ctx_events(ring); | |
1670 | ||
1671 | vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; | |
1672 | ring = &dev_priv->ring[VCS2]; | |
1673 | if (vcs & GT_RENDER_USER_INTERRUPT) | |
1674 | notify_ring(dev, ring); | |
1675 | if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) | |
1676 | intel_execlists_handle_ctx_events(ring); | |
1677 | } else | |
1678 | DRM_ERROR("The master control interrupt lied (GT1)!\n"); | |
1679 | } | |
1680 | ||
1681 | if (master_ctl & GEN8_GT_PM_IRQ) { | |
1682 | tmp = I915_READ(GEN8_GT_IIR(2)); | |
1683 | if (tmp & dev_priv->pm_rps_events) { | |
1684 | I915_WRITE(GEN8_GT_IIR(2), | |
1685 | tmp & dev_priv->pm_rps_events); | |
1686 | ret = IRQ_HANDLED; | |
1687 | gen8_rps_irq_handler(dev_priv, tmp); | |
1688 | } else | |
1689 | DRM_ERROR("The master control interrupt lied (PM)!\n"); | |
1690 | } | |
1691 | ||
1692 | if (master_ctl & GEN8_GT_VECS_IRQ) { | |
1693 | tmp = I915_READ(GEN8_GT_IIR(3)); | |
1694 | if (tmp) { | |
1695 | I915_WRITE(GEN8_GT_IIR(3), tmp); | |
1696 | ret = IRQ_HANDLED; | |
1697 | ||
1698 | vcs = tmp >> GEN8_VECS_IRQ_SHIFT; | |
1699 | ring = &dev_priv->ring[VECS]; | |
1700 | if (vcs & GT_RENDER_USER_INTERRUPT) | |
1701 | notify_ring(dev, ring); | |
1702 | if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) | |
1703 | intel_execlists_handle_ctx_events(ring); | |
1704 | } else | |
1705 | DRM_ERROR("The master control interrupt lied (GT3)!\n"); | |
1706 | } | |
1707 | ||
1708 | return ret; | |
1709 | } | |
1710 | ||
1711 | #define HPD_STORM_DETECT_PERIOD 1000 | |
1712 | #define HPD_STORM_THRESHOLD 5 | |
1713 | ||
1714 | static int ilk_port_to_hotplug_shift(enum port port) | |
1715 | { | |
1716 | switch (port) { | |
1717 | case PORT_A: | |
1718 | case PORT_E: | |
1719 | default: | |
1720 | return -1; | |
1721 | case PORT_B: | |
1722 | return 0; | |
1723 | case PORT_C: | |
1724 | return 8; | |
1725 | case PORT_D: | |
1726 | return 16; | |
1727 | } | |
1728 | } | |
1729 | ||
1730 | static int g4x_port_to_hotplug_shift(enum port port) | |
1731 | { | |
1732 | switch (port) { | |
1733 | case PORT_A: | |
1734 | case PORT_E: | |
1735 | default: | |
1736 | return -1; | |
1737 | case PORT_B: | |
1738 | return 17; | |
1739 | case PORT_C: | |
1740 | return 19; | |
1741 | case PORT_D: | |
1742 | return 21; | |
1743 | } | |
1744 | } | |
1745 | ||
1746 | static inline enum port get_port_from_pin(enum hpd_pin pin) | |
1747 | { | |
1748 | switch (pin) { | |
1749 | case HPD_PORT_B: | |
1750 | return PORT_B; | |
1751 | case HPD_PORT_C: | |
1752 | return PORT_C; | |
1753 | case HPD_PORT_D: | |
1754 | return PORT_D; | |
1755 | default: | |
1756 | return PORT_A; /* no hpd */ | |
1757 | } | |
1758 | } | |
1759 | ||
1760 | static inline void intel_hpd_irq_handler(struct drm_device *dev, | |
1761 | u32 hotplug_trigger, | |
1762 | u32 dig_hotplug_reg, | |
1763 | const u32 *hpd) | |
1764 | { | |
1765 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1766 | int i; | |
1767 | enum port port; | |
1768 | bool storm_detected = false; | |
1769 | bool queue_dig = false, queue_hp = false; | |
1770 | u32 dig_shift; | |
1771 | u32 dig_port_mask = 0; | |
1772 | ||
1773 | if (!hotplug_trigger) | |
1774 | return; | |
1775 | ||
1776 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n", | |
1777 | hotplug_trigger, dig_hotplug_reg); | |
1778 | ||
1779 | spin_lock(&dev_priv->irq_lock); | |
1780 | for (i = 1; i < HPD_NUM_PINS; i++) { | |
1781 | if (!(hpd[i] & hotplug_trigger)) | |
1782 | continue; | |
1783 | ||
1784 | port = get_port_from_pin(i); | |
1785 | if (port && dev_priv->hpd_irq_port[port]) { | |
1786 | bool long_hpd; | |
1787 | ||
1788 | if (IS_G4X(dev)) { | |
1789 | dig_shift = g4x_port_to_hotplug_shift(port); | |
1790 | long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; | |
1791 | } else { | |
1792 | dig_shift = ilk_port_to_hotplug_shift(port); | |
1793 | long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; | |
1794 | } | |
1795 | ||
1796 | DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", | |
1797 | port_name(port), | |
1798 | long_hpd ? "long" : "short"); | |
1799 | /* for long HPD pulses we want to have the digital queue happen, | |
1800 | but we still want HPD storm detection to function. */ | |
1801 | if (long_hpd) { | |
1802 | dev_priv->long_hpd_port_mask |= (1 << port); | |
1803 | dig_port_mask |= hpd[i]; | |
1804 | } else { | |
1805 | /* for short HPD just trigger the digital queue */ | |
1806 | dev_priv->short_hpd_port_mask |= (1 << port); | |
1807 | hotplug_trigger &= ~hpd[i]; | |
1808 | } | |
1809 | queue_dig = true; | |
1810 | } | |
1811 | } | |
1812 | ||
1813 | for (i = 1; i < HPD_NUM_PINS; i++) { | |
1814 | if (hpd[i] & hotplug_trigger && | |
1815 | dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { | |
1816 | /* | |
1817 | * On GMCH platforms the interrupt mask bits only | |
1818 | * prevent irq generation, not the setting of the | |
1819 | * hotplug bits itself. So only WARN about unexpected | |
1820 | * interrupts on saner platforms. | |
1821 | */ | |
1822 | WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), | |
1823 | "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", | |
1824 | hotplug_trigger, i, hpd[i]); | |
1825 | ||
1826 | continue; | |
1827 | } | |
1828 | ||
1829 | if (!(hpd[i] & hotplug_trigger) || | |
1830 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) | |
1831 | continue; | |
1832 | ||
1833 | if (!(dig_port_mask & hpd[i])) { | |
1834 | dev_priv->hpd_event_bits |= (1 << i); | |
1835 | queue_hp = true; | |
1836 | } | |
1837 | ||
1838 | if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, | |
1839 | dev_priv->hpd_stats[i].hpd_last_jiffies | |
1840 | + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { | |
1841 | dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; | |
1842 | dev_priv->hpd_stats[i].hpd_cnt = 0; | |
1843 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); | |
1844 | } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { | |
1845 | dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; | |
1846 | dev_priv->hpd_event_bits &= ~(1 << i); | |
1847 | DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); | |
1848 | storm_detected = true; | |
1849 | } else { | |
1850 | dev_priv->hpd_stats[i].hpd_cnt++; | |
1851 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, | |
1852 | dev_priv->hpd_stats[i].hpd_cnt); | |
1853 | } | |
1854 | } | |
1855 | ||
1856 | if (storm_detected) | |
1857 | dev_priv->display.hpd_irq_setup(dev); | |
1858 | spin_unlock(&dev_priv->irq_lock); | |
1859 | ||
1860 | /* | |
1861 | * Our hotplug handler can grab modeset locks (by calling down into the | |
1862 | * fb helpers). Hence it must not be run on our own dev-priv->wq work | |
1863 | * queue for otherwise the flush_work in the pageflip code will | |
1864 | * deadlock. | |
1865 | */ | |
1866 | if (queue_dig) | |
1867 | queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work); | |
1868 | if (queue_hp) | |
1869 | schedule_work(&dev_priv->hotplug_work); | |
1870 | } | |
1871 | ||
1872 | static void gmbus_irq_handler(struct drm_device *dev) | |
1873 | { | |
1874 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1875 | ||
1876 | wake_up_all(&dev_priv->gmbus_wait_queue); | |
1877 | } | |
1878 | ||
1879 | static void dp_aux_irq_handler(struct drm_device *dev) | |
1880 | { | |
1881 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1882 | ||
1883 | wake_up_all(&dev_priv->gmbus_wait_queue); | |
1884 | } | |
1885 | ||
1886 | #if defined(CONFIG_DEBUG_FS) | |
1887 | static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, | |
1888 | uint32_t crc0, uint32_t crc1, | |
1889 | uint32_t crc2, uint32_t crc3, | |
1890 | uint32_t crc4) | |
1891 | { | |
1892 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1893 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; | |
1894 | struct intel_pipe_crc_entry *entry; | |
1895 | int head, tail; | |
1896 | ||
1897 | spin_lock(&pipe_crc->lock); | |
1898 | ||
1899 | if (!pipe_crc->entries) { | |
1900 | spin_unlock(&pipe_crc->lock); | |
1901 | DRM_ERROR("spurious interrupt\n"); | |
1902 | return; | |
1903 | } | |
1904 | ||
1905 | head = pipe_crc->head; | |
1906 | tail = pipe_crc->tail; | |
1907 | ||
1908 | if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { | |
1909 | spin_unlock(&pipe_crc->lock); | |
1910 | DRM_ERROR("CRC buffer overflowing\n"); | |
1911 | return; | |
1912 | } | |
1913 | ||
1914 | entry = &pipe_crc->entries[head]; | |
1915 | ||
1916 | entry->frame = dev->driver->get_vblank_counter(dev, pipe); | |
1917 | entry->crc[0] = crc0; | |
1918 | entry->crc[1] = crc1; | |
1919 | entry->crc[2] = crc2; | |
1920 | entry->crc[3] = crc3; | |
1921 | entry->crc[4] = crc4; | |
1922 | ||
1923 | head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); | |
1924 | pipe_crc->head = head; | |
1925 | ||
1926 | spin_unlock(&pipe_crc->lock); | |
1927 | ||
1928 | wake_up_interruptible(&pipe_crc->wq); | |
1929 | } | |
1930 | #else | |
1931 | static inline void | |
1932 | display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, | |
1933 | uint32_t crc0, uint32_t crc1, | |
1934 | uint32_t crc2, uint32_t crc3, | |
1935 | uint32_t crc4) {} | |
1936 | #endif | |
1937 | ||
1938 | ||
1939 | static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) | |
1940 | { | |
1941 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1942 | ||
1943 | display_pipe_crc_irq_handler(dev, pipe, | |
1944 | I915_READ(PIPE_CRC_RES_1_IVB(pipe)), | |
1945 | 0, 0, 0, 0); | |
1946 | } | |
1947 | ||
1948 | static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) | |
1949 | { | |
1950 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1951 | ||
1952 | display_pipe_crc_irq_handler(dev, pipe, | |
1953 | I915_READ(PIPE_CRC_RES_1_IVB(pipe)), | |
1954 | I915_READ(PIPE_CRC_RES_2_IVB(pipe)), | |
1955 | I915_READ(PIPE_CRC_RES_3_IVB(pipe)), | |
1956 | I915_READ(PIPE_CRC_RES_4_IVB(pipe)), | |
1957 | I915_READ(PIPE_CRC_RES_5_IVB(pipe))); | |
1958 | } | |
1959 | ||
1960 | static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) | |
1961 | { | |
1962 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1963 | uint32_t res1, res2; | |
1964 | ||
1965 | if (INTEL_INFO(dev)->gen >= 3) | |
1966 | res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); | |
1967 | else | |
1968 | res1 = 0; | |
1969 | ||
1970 | if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) | |
1971 | res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); | |
1972 | else | |
1973 | res2 = 0; | |
1974 | ||
1975 | display_pipe_crc_irq_handler(dev, pipe, | |
1976 | I915_READ(PIPE_CRC_RES_RED(pipe)), | |
1977 | I915_READ(PIPE_CRC_RES_GREEN(pipe)), | |
1978 | I915_READ(PIPE_CRC_RES_BLUE(pipe)), | |
1979 | res1, res2); | |
1980 | } | |
1981 | ||
1982 | void gen8_flip_interrupt(struct drm_device *dev) | |
1983 | { | |
1984 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1985 | ||
1986 | if (!dev_priv->rps.is_bdw_sw_turbo) | |
1987 | return; | |
1988 | ||
1989 | if(atomic_read(&dev_priv->rps.sw_turbo.flip_received)) { | |
1990 | mod_timer(&dev_priv->rps.sw_turbo.flip_timer, | |
1991 | usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies); | |
1992 | } | |
1993 | else { | |
1994 | dev_priv->rps.sw_turbo.flip_timer.expires = | |
1995 | usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies; | |
1996 | add_timer(&dev_priv->rps.sw_turbo.flip_timer); | |
1997 | atomic_set(&dev_priv->rps.sw_turbo.flip_received, true); | |
1998 | } | |
1999 | ||
2000 | bdw_software_turbo(dev); | |
2001 | } | |
2002 | ||
2003 | /* The RPS events need forcewake, so we add them to a work queue and mask their | |
2004 | * IMR bits until the work is done. Other interrupts can be processed without | |
2005 | * the work queue. */ | |
2006 | static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) | |
2007 | { | |
2008 | if (pm_iir & dev_priv->pm_rps_events) { | |
2009 | spin_lock(&dev_priv->irq_lock); | |
2010 | dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; | |
2011 | gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); | |
2012 | spin_unlock(&dev_priv->irq_lock); | |
2013 | ||
2014 | queue_work(dev_priv->wq, &dev_priv->rps.work); | |
2015 | } | |
2016 | ||
2017 | if (HAS_VEBOX(dev_priv->dev)) { | |
2018 | if (pm_iir & PM_VEBOX_USER_INTERRUPT) | |
2019 | notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); | |
2020 | ||
2021 | if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { | |
2022 | i915_handle_error(dev_priv->dev, false, | |
2023 | "VEBOX CS error interrupt 0x%08x", | |
2024 | pm_iir); | |
2025 | } | |
2026 | } | |
2027 | } | |
2028 | ||
2029 | static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) | |
2030 | { | |
2031 | if (!drm_handle_vblank(dev, pipe)) | |
2032 | return false; | |
2033 | ||
2034 | return true; | |
2035 | } | |
2036 | ||
2037 | static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) | |
2038 | { | |
2039 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2040 | u32 pipe_stats[I915_MAX_PIPES] = { }; | |
2041 | int pipe; | |
2042 | ||
2043 | spin_lock(&dev_priv->irq_lock); | |
2044 | for_each_pipe(dev_priv, pipe) { | |
2045 | int reg; | |
2046 | u32 mask, iir_bit = 0; | |
2047 | ||
2048 | /* | |
2049 | * PIPESTAT bits get signalled even when the interrupt is | |
2050 | * disabled with the mask bits, and some of the status bits do | |
2051 | * not generate interrupts at all (like the underrun bit). Hence | |
2052 | * we need to be careful that we only handle what we want to | |
2053 | * handle. | |
2054 | */ | |
2055 | mask = 0; | |
2056 | if (__cpu_fifo_underrun_reporting_enabled(dev, pipe)) | |
2057 | mask |= PIPE_FIFO_UNDERRUN_STATUS; | |
2058 | ||
2059 | switch (pipe) { | |
2060 | case PIPE_A: | |
2061 | iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; | |
2062 | break; | |
2063 | case PIPE_B: | |
2064 | iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | |
2065 | break; | |
2066 | case PIPE_C: | |
2067 | iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; | |
2068 | break; | |
2069 | } | |
2070 | if (iir & iir_bit) | |
2071 | mask |= dev_priv->pipestat_irq_mask[pipe]; | |
2072 | ||
2073 | if (!mask) | |
2074 | continue; | |
2075 | ||
2076 | reg = PIPESTAT(pipe); | |
2077 | mask |= PIPESTAT_INT_ENABLE_MASK; | |
2078 | pipe_stats[pipe] = I915_READ(reg) & mask; | |
2079 | ||
2080 | /* | |
2081 | * Clear the PIPE*STAT regs before the IIR | |
2082 | */ | |
2083 | if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | | |
2084 | PIPESTAT_INT_STATUS_MASK)) | |
2085 | I915_WRITE(reg, pipe_stats[pipe]); | |
2086 | } | |
2087 | spin_unlock(&dev_priv->irq_lock); | |
2088 | ||
2089 | for_each_pipe(dev_priv, pipe) { | |
2090 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && | |
2091 | intel_pipe_handle_vblank(dev, pipe)) | |
2092 | intel_check_page_flip(dev, pipe); | |
2093 | ||
2094 | if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { | |
2095 | intel_prepare_page_flip(dev, pipe); | |
2096 | intel_finish_page_flip(dev, pipe); | |
2097 | } | |
2098 | ||
2099 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) | |
2100 | i9xx_pipe_crc_irq_handler(dev, pipe); | |
2101 | ||
2102 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && | |
2103 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) | |
2104 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); | |
2105 | } | |
2106 | ||
2107 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) | |
2108 | gmbus_irq_handler(dev); | |
2109 | } | |
2110 | ||
2111 | static void i9xx_hpd_irq_handler(struct drm_device *dev) | |
2112 | { | |
2113 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2114 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | |
2115 | ||
2116 | if (hotplug_status) { | |
2117 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | |
2118 | /* | |
2119 | * Make sure hotplug status is cleared before we clear IIR, or else we | |
2120 | * may miss hotplug events. | |
2121 | */ | |
2122 | POSTING_READ(PORT_HOTPLUG_STAT); | |
2123 | ||
2124 | if (IS_G4X(dev)) { | |
2125 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; | |
2126 | ||
2127 | intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); | |
2128 | } else { | |
2129 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; | |
2130 | ||
2131 | intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915); | |
2132 | } | |
2133 | ||
2134 | if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && | |
2135 | hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) | |
2136 | dp_aux_irq_handler(dev); | |
2137 | } | |
2138 | } | |
2139 | ||
2140 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) | |
2141 | { | |
2142 | struct drm_device *dev = arg; | |
2143 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2144 | u32 iir, gt_iir, pm_iir; | |
2145 | irqreturn_t ret = IRQ_NONE; | |
2146 | ||
2147 | while (true) { | |
2148 | /* Find, clear, then process each source of interrupt */ | |
2149 | ||
2150 | gt_iir = I915_READ(GTIIR); | |
2151 | if (gt_iir) | |
2152 | I915_WRITE(GTIIR, gt_iir); | |
2153 | ||
2154 | pm_iir = I915_READ(GEN6_PMIIR); | |
2155 | if (pm_iir) | |
2156 | I915_WRITE(GEN6_PMIIR, pm_iir); | |
2157 | ||
2158 | iir = I915_READ(VLV_IIR); | |
2159 | if (iir) { | |
2160 | /* Consume port before clearing IIR or we'll miss events */ | |
2161 | if (iir & I915_DISPLAY_PORT_INTERRUPT) | |
2162 | i9xx_hpd_irq_handler(dev); | |
2163 | I915_WRITE(VLV_IIR, iir); | |
2164 | } | |
2165 | ||
2166 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) | |
2167 | goto out; | |
2168 | ||
2169 | ret = IRQ_HANDLED; | |
2170 | ||
2171 | if (gt_iir) | |
2172 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | |
2173 | if (pm_iir) | |
2174 | gen6_rps_irq_handler(dev_priv, pm_iir); | |
2175 | /* Call regardless, as some status bits might not be | |
2176 | * signalled in iir */ | |
2177 | valleyview_pipestat_irq_handler(dev, iir); | |
2178 | } | |
2179 | ||
2180 | out: | |
2181 | return ret; | |
2182 | } | |
2183 | ||
2184 | static irqreturn_t cherryview_irq_handler(int irq, void *arg) | |
2185 | { | |
2186 | struct drm_device *dev = arg; | |
2187 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2188 | u32 master_ctl, iir; | |
2189 | irqreturn_t ret = IRQ_NONE; | |
2190 | ||
2191 | for (;;) { | |
2192 | master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; | |
2193 | iir = I915_READ(VLV_IIR); | |
2194 | ||
2195 | if (master_ctl == 0 && iir == 0) | |
2196 | break; | |
2197 | ||
2198 | ret = IRQ_HANDLED; | |
2199 | ||
2200 | I915_WRITE(GEN8_MASTER_IRQ, 0); | |
2201 | ||
2202 | /* Find, clear, then process each source of interrupt */ | |
2203 | ||
2204 | if (iir) { | |
2205 | /* Consume port before clearing IIR or we'll miss events */ | |
2206 | if (iir & I915_DISPLAY_PORT_INTERRUPT) | |
2207 | i9xx_hpd_irq_handler(dev); | |
2208 | I915_WRITE(VLV_IIR, iir); | |
2209 | } | |
2210 | ||
2211 | gen8_gt_irq_handler(dev, dev_priv, master_ctl); | |
2212 | ||
2213 | /* Call regardless, as some status bits might not be | |
2214 | * signalled in iir */ | |
2215 | valleyview_pipestat_irq_handler(dev, iir); | |
2216 | ||
2217 | I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); | |
2218 | POSTING_READ(GEN8_MASTER_IRQ); | |
2219 | } | |
2220 | ||
2221 | return ret; | |
2222 | } | |
2223 | ||
2224 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) | |
2225 | { | |
2226 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2227 | int pipe; | |
2228 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; | |
2229 | u32 dig_hotplug_reg; | |
2230 | ||
2231 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); | |
2232 | I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); | |
2233 | ||
2234 | intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx); | |
2235 | ||
2236 | if (pch_iir & SDE_AUDIO_POWER_MASK) { | |
2237 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> | |
2238 | SDE_AUDIO_POWER_SHIFT); | |
2239 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | |
2240 | port_name(port)); | |
2241 | } | |
2242 | ||
2243 | if (pch_iir & SDE_AUX_MASK) | |
2244 | dp_aux_irq_handler(dev); | |
2245 | ||
2246 | if (pch_iir & SDE_GMBUS) | |
2247 | gmbus_irq_handler(dev); | |
2248 | ||
2249 | if (pch_iir & SDE_AUDIO_HDCP_MASK) | |
2250 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); | |
2251 | ||
2252 | if (pch_iir & SDE_AUDIO_TRANS_MASK) | |
2253 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); | |
2254 | ||
2255 | if (pch_iir & SDE_POISON) | |
2256 | DRM_ERROR("PCH poison interrupt\n"); | |
2257 | ||
2258 | if (pch_iir & SDE_FDI_MASK) | |
2259 | for_each_pipe(dev_priv, pipe) | |
2260 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | |
2261 | pipe_name(pipe), | |
2262 | I915_READ(FDI_RX_IIR(pipe))); | |
2263 | ||
2264 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) | |
2265 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); | |
2266 | ||
2267 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) | |
2268 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); | |
2269 | ||
2270 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) | |
2271 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, | |
2272 | false)) | |
2273 | DRM_ERROR("PCH transcoder A FIFO underrun\n"); | |
2274 | ||
2275 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) | |
2276 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, | |
2277 | false)) | |
2278 | DRM_ERROR("PCH transcoder B FIFO underrun\n"); | |
2279 | } | |
2280 | ||
2281 | static void ivb_err_int_handler(struct drm_device *dev) | |
2282 | { | |
2283 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2284 | u32 err_int = I915_READ(GEN7_ERR_INT); | |
2285 | enum pipe pipe; | |
2286 | ||
2287 | if (err_int & ERR_INT_POISON) | |
2288 | DRM_ERROR("Poison interrupt\n"); | |
2289 | ||
2290 | for_each_pipe(dev_priv, pipe) { | |
2291 | if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { | |
2292 | if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, | |
2293 | false)) | |
2294 | DRM_ERROR("Pipe %c FIFO underrun\n", | |
2295 | pipe_name(pipe)); | |
2296 | } | |
2297 | ||
2298 | if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { | |
2299 | if (IS_IVYBRIDGE(dev)) | |
2300 | ivb_pipe_crc_irq_handler(dev, pipe); | |
2301 | else | |
2302 | hsw_pipe_crc_irq_handler(dev, pipe); | |
2303 | } | |
2304 | } | |
2305 | ||
2306 | I915_WRITE(GEN7_ERR_INT, err_int); | |
2307 | } | |
2308 | ||
2309 | static void cpt_serr_int_handler(struct drm_device *dev) | |
2310 | { | |
2311 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2312 | u32 serr_int = I915_READ(SERR_INT); | |
2313 | ||
2314 | if (serr_int & SERR_INT_POISON) | |
2315 | DRM_ERROR("PCH poison interrupt\n"); | |
2316 | ||
2317 | if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) | |
2318 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, | |
2319 | false)) | |
2320 | DRM_ERROR("PCH transcoder A FIFO underrun\n"); | |
2321 | ||
2322 | if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) | |
2323 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, | |
2324 | false)) | |
2325 | DRM_ERROR("PCH transcoder B FIFO underrun\n"); | |
2326 | ||
2327 | if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) | |
2328 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, | |
2329 | false)) | |
2330 | DRM_ERROR("PCH transcoder C FIFO underrun\n"); | |
2331 | ||
2332 | I915_WRITE(SERR_INT, serr_int); | |
2333 | } | |
2334 | ||
2335 | static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) | |
2336 | { | |
2337 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2338 | int pipe; | |
2339 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; | |
2340 | u32 dig_hotplug_reg; | |
2341 | ||
2342 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); | |
2343 | I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); | |
2344 | ||
2345 | intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt); | |
2346 | ||
2347 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { | |
2348 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> | |
2349 | SDE_AUDIO_POWER_SHIFT_CPT); | |
2350 | DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", | |
2351 | port_name(port)); | |
2352 | } | |
2353 | ||
2354 | if (pch_iir & SDE_AUX_MASK_CPT) | |
2355 | dp_aux_irq_handler(dev); | |
2356 | ||
2357 | if (pch_iir & SDE_GMBUS_CPT) | |
2358 | gmbus_irq_handler(dev); | |
2359 | ||
2360 | if (pch_iir & SDE_AUDIO_CP_REQ_CPT) | |
2361 | DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); | |
2362 | ||
2363 | if (pch_iir & SDE_AUDIO_CP_CHG_CPT) | |
2364 | DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); | |
2365 | ||
2366 | if (pch_iir & SDE_FDI_MASK_CPT) | |
2367 | for_each_pipe(dev_priv, pipe) | |
2368 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | |
2369 | pipe_name(pipe), | |
2370 | I915_READ(FDI_RX_IIR(pipe))); | |
2371 | ||
2372 | if (pch_iir & SDE_ERROR_CPT) | |
2373 | cpt_serr_int_handler(dev); | |
2374 | } | |
2375 | ||
2376 | static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) | |
2377 | { | |
2378 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2379 | enum pipe pipe; | |
2380 | ||
2381 | if (de_iir & DE_AUX_CHANNEL_A) | |
2382 | dp_aux_irq_handler(dev); | |
2383 | ||
2384 | if (de_iir & DE_GSE) | |
2385 | intel_opregion_asle_intr(dev); | |
2386 | ||
2387 | if (de_iir & DE_POISON) | |
2388 | DRM_ERROR("Poison interrupt\n"); | |
2389 | ||
2390 | for_each_pipe(dev_priv, pipe) { | |
2391 | if (de_iir & DE_PIPE_VBLANK(pipe) && | |
2392 | intel_pipe_handle_vblank(dev, pipe)) | |
2393 | intel_check_page_flip(dev, pipe); | |
2394 | ||
2395 | if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) | |
2396 | if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) | |
2397 | DRM_ERROR("Pipe %c FIFO underrun\n", | |
2398 | pipe_name(pipe)); | |
2399 | ||
2400 | if (de_iir & DE_PIPE_CRC_DONE(pipe)) | |
2401 | i9xx_pipe_crc_irq_handler(dev, pipe); | |
2402 | ||
2403 | /* plane/pipes map 1:1 on ilk+ */ | |
2404 | if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { | |
2405 | intel_prepare_page_flip(dev, pipe); | |
2406 | intel_finish_page_flip_plane(dev, pipe); | |
2407 | } | |
2408 | } | |
2409 | ||
2410 | /* check event from PCH */ | |
2411 | if (de_iir & DE_PCH_EVENT) { | |
2412 | u32 pch_iir = I915_READ(SDEIIR); | |
2413 | ||
2414 | if (HAS_PCH_CPT(dev)) | |
2415 | cpt_irq_handler(dev, pch_iir); | |
2416 | else | |
2417 | ibx_irq_handler(dev, pch_iir); | |
2418 | ||
2419 | /* should clear PCH hotplug event before clear CPU irq */ | |
2420 | I915_WRITE(SDEIIR, pch_iir); | |
2421 | } | |
2422 | ||
2423 | if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) | |
2424 | ironlake_rps_change_irq_handler(dev); | |
2425 | } | |
2426 | ||
2427 | static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) | |
2428 | { | |
2429 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2430 | enum pipe pipe; | |
2431 | ||
2432 | if (de_iir & DE_ERR_INT_IVB) | |
2433 | ivb_err_int_handler(dev); | |
2434 | ||
2435 | if (de_iir & DE_AUX_CHANNEL_A_IVB) | |
2436 | dp_aux_irq_handler(dev); | |
2437 | ||
2438 | if (de_iir & DE_GSE_IVB) | |
2439 | intel_opregion_asle_intr(dev); | |
2440 | ||
2441 | for_each_pipe(dev_priv, pipe) { | |
2442 | if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && | |
2443 | intel_pipe_handle_vblank(dev, pipe)) | |
2444 | intel_check_page_flip(dev, pipe); | |
2445 | ||
2446 | /* plane/pipes map 1:1 on ilk+ */ | |
2447 | if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { | |
2448 | intel_prepare_page_flip(dev, pipe); | |
2449 | intel_finish_page_flip_plane(dev, pipe); | |
2450 | } | |
2451 | } | |
2452 | ||
2453 | /* check event from PCH */ | |
2454 | if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { | |
2455 | u32 pch_iir = I915_READ(SDEIIR); | |
2456 | ||
2457 | cpt_irq_handler(dev, pch_iir); | |
2458 | ||
2459 | /* clear PCH hotplug event before clear CPU irq */ | |
2460 | I915_WRITE(SDEIIR, pch_iir); | |
2461 | } | |
2462 | } | |
2463 | ||
2464 | /* | |
2465 | * To handle irqs with the minimum potential races with fresh interrupts, we: | |
2466 | * 1 - Disable Master Interrupt Control. | |
2467 | * 2 - Find the source(s) of the interrupt. | |
2468 | * 3 - Clear the Interrupt Identity bits (IIR). | |
2469 | * 4 - Process the interrupt(s) that had bits set in the IIRs. | |
2470 | * 5 - Re-enable Master Interrupt Control. | |
2471 | */ | |
2472 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |
2473 | { | |
2474 | struct drm_device *dev = arg; | |
2475 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2476 | u32 de_iir, gt_iir, de_ier, sde_ier = 0; | |
2477 | irqreturn_t ret = IRQ_NONE; | |
2478 | ||
2479 | /* We get interrupts on unclaimed registers, so check for this before we | |
2480 | * do any I915_{READ,WRITE}. */ | |
2481 | intel_uncore_check_errors(dev); | |
2482 | ||
2483 | /* disable master interrupt before clearing iir */ | |
2484 | de_ier = I915_READ(DEIER); | |
2485 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | |
2486 | POSTING_READ(DEIER); | |
2487 | ||
2488 | /* Disable south interrupts. We'll only write to SDEIIR once, so further | |
2489 | * interrupts will will be stored on its back queue, and then we'll be | |
2490 | * able to process them after we restore SDEIER (as soon as we restore | |
2491 | * it, we'll get an interrupt if SDEIIR still has something to process | |
2492 | * due to its back queue). */ | |
2493 | if (!HAS_PCH_NOP(dev)) { | |
2494 | sde_ier = I915_READ(SDEIER); | |
2495 | I915_WRITE(SDEIER, 0); | |
2496 | POSTING_READ(SDEIER); | |
2497 | } | |
2498 | ||
2499 | /* Find, clear, then process each source of interrupt */ | |
2500 | ||
2501 | gt_iir = I915_READ(GTIIR); | |
2502 | if (gt_iir) { | |
2503 | I915_WRITE(GTIIR, gt_iir); | |
2504 | ret = IRQ_HANDLED; | |
2505 | if (INTEL_INFO(dev)->gen >= 6) | |
2506 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | |
2507 | else | |
2508 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); | |
2509 | } | |
2510 | ||
2511 | de_iir = I915_READ(DEIIR); | |
2512 | if (de_iir) { | |
2513 | I915_WRITE(DEIIR, de_iir); | |
2514 | ret = IRQ_HANDLED; | |
2515 | if (INTEL_INFO(dev)->gen >= 7) | |
2516 | ivb_display_irq_handler(dev, de_iir); | |
2517 | else | |
2518 | ilk_display_irq_handler(dev, de_iir); | |
2519 | } | |
2520 | ||
2521 | if (INTEL_INFO(dev)->gen >= 6) { | |
2522 | u32 pm_iir = I915_READ(GEN6_PMIIR); | |
2523 | if (pm_iir) { | |
2524 | I915_WRITE(GEN6_PMIIR, pm_iir); | |
2525 | ret = IRQ_HANDLED; | |
2526 | gen6_rps_irq_handler(dev_priv, pm_iir); | |
2527 | } | |
2528 | } | |
2529 | ||
2530 | I915_WRITE(DEIER, de_ier); | |
2531 | POSTING_READ(DEIER); | |
2532 | if (!HAS_PCH_NOP(dev)) { | |
2533 | I915_WRITE(SDEIER, sde_ier); | |
2534 | POSTING_READ(SDEIER); | |
2535 | } | |
2536 | ||
2537 | return ret; | |
2538 | } | |
2539 | ||
2540 | static irqreturn_t gen8_irq_handler(int irq, void *arg) | |
2541 | { | |
2542 | struct drm_device *dev = arg; | |
2543 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2544 | u32 master_ctl; | |
2545 | irqreturn_t ret = IRQ_NONE; | |
2546 | uint32_t tmp = 0; | |
2547 | enum pipe pipe; | |
2548 | ||
2549 | master_ctl = I915_READ(GEN8_MASTER_IRQ); | |
2550 | master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; | |
2551 | if (!master_ctl) | |
2552 | return IRQ_NONE; | |
2553 | ||
2554 | I915_WRITE(GEN8_MASTER_IRQ, 0); | |
2555 | POSTING_READ(GEN8_MASTER_IRQ); | |
2556 | ||
2557 | /* Find, clear, then process each source of interrupt */ | |
2558 | ||
2559 | ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); | |
2560 | ||
2561 | if (master_ctl & GEN8_DE_MISC_IRQ) { | |
2562 | tmp = I915_READ(GEN8_DE_MISC_IIR); | |
2563 | if (tmp) { | |
2564 | I915_WRITE(GEN8_DE_MISC_IIR, tmp); | |
2565 | ret = IRQ_HANDLED; | |
2566 | if (tmp & GEN8_DE_MISC_GSE) | |
2567 | intel_opregion_asle_intr(dev); | |
2568 | else | |
2569 | DRM_ERROR("Unexpected DE Misc interrupt\n"); | |
2570 | } | |
2571 | else | |
2572 | DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); | |
2573 | } | |
2574 | ||
2575 | if (master_ctl & GEN8_DE_PORT_IRQ) { | |
2576 | tmp = I915_READ(GEN8_DE_PORT_IIR); | |
2577 | if (tmp) { | |
2578 | I915_WRITE(GEN8_DE_PORT_IIR, tmp); | |
2579 | ret = IRQ_HANDLED; | |
2580 | if (tmp & GEN8_AUX_CHANNEL_A) | |
2581 | dp_aux_irq_handler(dev); | |
2582 | else | |
2583 | DRM_ERROR("Unexpected DE Port interrupt\n"); | |
2584 | } | |
2585 | else | |
2586 | DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); | |
2587 | } | |
2588 | ||
2589 | for_each_pipe(dev_priv, pipe) { | |
2590 | uint32_t pipe_iir; | |
2591 | ||
2592 | if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) | |
2593 | continue; | |
2594 | ||
2595 | pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); | |
2596 | if (pipe_iir) { | |
2597 | ret = IRQ_HANDLED; | |
2598 | I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); | |
2599 | if (pipe_iir & GEN8_PIPE_VBLANK && | |
2600 | intel_pipe_handle_vblank(dev, pipe)) | |
2601 | intel_check_page_flip(dev, pipe); | |
2602 | ||
2603 | if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { | |
2604 | intel_prepare_page_flip(dev, pipe); | |
2605 | intel_finish_page_flip_plane(dev, pipe); | |
2606 | } | |
2607 | ||
2608 | if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) | |
2609 | hsw_pipe_crc_irq_handler(dev, pipe); | |
2610 | ||
2611 | if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { | |
2612 | if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, | |
2613 | false)) | |
2614 | DRM_ERROR("Pipe %c FIFO underrun\n", | |
2615 | pipe_name(pipe)); | |
2616 | } | |
2617 | ||
2618 | if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { | |
2619 | DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", | |
2620 | pipe_name(pipe), | |
2621 | pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); | |
2622 | } | |
2623 | } else | |
2624 | DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); | |
2625 | } | |
2626 | ||
2627 | if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { | |
2628 | /* | |
2629 | * FIXME(BDW): Assume for now that the new interrupt handling | |
2630 | * scheme also closed the SDE interrupt handling race we've seen | |
2631 | * on older pch-split platforms. But this needs testing. | |
2632 | */ | |
2633 | u32 pch_iir = I915_READ(SDEIIR); | |
2634 | if (pch_iir) { | |
2635 | I915_WRITE(SDEIIR, pch_iir); | |
2636 | ret = IRQ_HANDLED; | |
2637 | cpt_irq_handler(dev, pch_iir); | |
2638 | } else | |
2639 | DRM_ERROR("The master control interrupt lied (SDE)!\n"); | |
2640 | ||
2641 | } | |
2642 | ||
2643 | I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); | |
2644 | POSTING_READ(GEN8_MASTER_IRQ); | |
2645 | ||
2646 | return ret; | |
2647 | } | |
2648 | ||
2649 | static void i915_error_wake_up(struct drm_i915_private *dev_priv, | |
2650 | bool reset_completed) | |
2651 | { | |
2652 | struct intel_engine_cs *ring; | |
2653 | int i; | |
2654 | ||
2655 | /* | |
2656 | * Notify all waiters for GPU completion events that reset state has | |
2657 | * been changed, and that they need to restart their wait after | |
2658 | * checking for potential errors (and bail out to drop locks if there is | |
2659 | * a gpu reset pending so that i915_error_work_func can acquire them). | |
2660 | */ | |
2661 | ||
2662 | /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ | |
2663 | for_each_ring(ring, dev_priv, i) | |
2664 | wake_up_all(&ring->irq_queue); | |
2665 | ||
2666 | /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ | |
2667 | wake_up_all(&dev_priv->pending_flip_queue); | |
2668 | ||
2669 | /* | |
2670 | * Signal tasks blocked in i915_gem_wait_for_error that the pending | |
2671 | * reset state is cleared. | |
2672 | */ | |
2673 | if (reset_completed) | |
2674 | wake_up_all(&dev_priv->gpu_error.reset_queue); | |
2675 | } | |
2676 | ||
2677 | /** | |
2678 | * i915_error_work_func - do process context error handling work | |
2679 | * @work: work struct | |
2680 | * | |
2681 | * Fire an error uevent so userspace can see that a hang or error | |
2682 | * was detected. | |
2683 | */ | |
2684 | static void i915_error_work_func(struct work_struct *work) | |
2685 | { | |
2686 | struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, | |
2687 | work); | |
2688 | struct drm_i915_private *dev_priv = | |
2689 | container_of(error, struct drm_i915_private, gpu_error); | |
2690 | struct drm_device *dev = dev_priv->dev; | |
2691 | char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; | |
2692 | char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; | |
2693 | char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; | |
2694 | int ret; | |
2695 | ||
2696 | kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); | |
2697 | ||
2698 | /* | |
2699 | * Note that there's only one work item which does gpu resets, so we | |
2700 | * need not worry about concurrent gpu resets potentially incrementing | |
2701 | * error->reset_counter twice. We only need to take care of another | |
2702 | * racing irq/hangcheck declaring the gpu dead for a second time. A | |
2703 | * quick check for that is good enough: schedule_work ensures the | |
2704 | * correct ordering between hang detection and this work item, and since | |
2705 | * the reset in-progress bit is only ever set by code outside of this | |
2706 | * work we don't need to worry about any other races. | |
2707 | */ | |
2708 | if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { | |
2709 | DRM_DEBUG_DRIVER("resetting chip\n"); | |
2710 | kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, | |
2711 | reset_event); | |
2712 | ||
2713 | /* | |
2714 | * In most cases it's guaranteed that we get here with an RPM | |
2715 | * reference held, for example because there is a pending GPU | |
2716 | * request that won't finish until the reset is done. This | |
2717 | * isn't the case at least when we get here by doing a | |
2718 | * simulated reset via debugs, so get an RPM reference. | |
2719 | */ | |
2720 | intel_runtime_pm_get(dev_priv); | |
2721 | /* | |
2722 | * All state reset _must_ be completed before we update the | |
2723 | * reset counter, for otherwise waiters might miss the reset | |
2724 | * pending state and not properly drop locks, resulting in | |
2725 | * deadlocks with the reset work. | |
2726 | */ | |
2727 | ret = i915_reset(dev); | |
2728 | ||
2729 | intel_display_handle_reset(dev); | |
2730 | ||
2731 | intel_runtime_pm_put(dev_priv); | |
2732 | ||
2733 | if (ret == 0) { | |
2734 | /* | |
2735 | * After all the gem state is reset, increment the reset | |
2736 | * counter and wake up everyone waiting for the reset to | |
2737 | * complete. | |
2738 | * | |
2739 | * Since unlock operations are a one-sided barrier only, | |
2740 | * we need to insert a barrier here to order any seqno | |
2741 | * updates before | |
2742 | * the counter increment. | |
2743 | */ | |
2744 | smp_mb__before_atomic(); | |
2745 | atomic_inc(&dev_priv->gpu_error.reset_counter); | |
2746 | ||
2747 | kobject_uevent_env(&dev->primary->kdev->kobj, | |
2748 | KOBJ_CHANGE, reset_done_event); | |
2749 | } else { | |
2750 | atomic_set_mask(I915_WEDGED, &error->reset_counter); | |
2751 | } | |
2752 | ||
2753 | /* | |
2754 | * Note: The wake_up also serves as a memory barrier so that | |
2755 | * waiters see the update value of the reset counter atomic_t. | |
2756 | */ | |
2757 | i915_error_wake_up(dev_priv, true); | |
2758 | } | |
2759 | } | |
2760 | ||
2761 | static void i915_report_and_clear_eir(struct drm_device *dev) | |
2762 | { | |
2763 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2764 | uint32_t instdone[I915_NUM_INSTDONE_REG]; | |
2765 | u32 eir = I915_READ(EIR); | |
2766 | int pipe, i; | |
2767 | ||
2768 | if (!eir) | |
2769 | return; | |
2770 | ||
2771 | pr_err("render error detected, EIR: 0x%08x\n", eir); | |
2772 | ||
2773 | i915_get_extra_instdone(dev, instdone); | |
2774 | ||
2775 | if (IS_G4X(dev)) { | |
2776 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { | |
2777 | u32 ipeir = I915_READ(IPEIR_I965); | |
2778 | ||
2779 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); | |
2780 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | |
2781 | for (i = 0; i < ARRAY_SIZE(instdone); i++) | |
2782 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | |
2783 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); | |
2784 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); | |
2785 | I915_WRITE(IPEIR_I965, ipeir); | |
2786 | POSTING_READ(IPEIR_I965); | |
2787 | } | |
2788 | if (eir & GM45_ERROR_PAGE_TABLE) { | |
2789 | u32 pgtbl_err = I915_READ(PGTBL_ER); | |
2790 | pr_err("page table error\n"); | |
2791 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | |
2792 | I915_WRITE(PGTBL_ER, pgtbl_err); | |
2793 | POSTING_READ(PGTBL_ER); | |
2794 | } | |
2795 | } | |
2796 | ||
2797 | if (!IS_GEN2(dev)) { | |
2798 | if (eir & I915_ERROR_PAGE_TABLE) { | |
2799 | u32 pgtbl_err = I915_READ(PGTBL_ER); | |
2800 | pr_err("page table error\n"); | |
2801 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | |
2802 | I915_WRITE(PGTBL_ER, pgtbl_err); | |
2803 | POSTING_READ(PGTBL_ER); | |
2804 | } | |
2805 | } | |
2806 | ||
2807 | if (eir & I915_ERROR_MEMORY_REFRESH) { | |
2808 | pr_err("memory refresh error:\n"); | |
2809 | for_each_pipe(dev_priv, pipe) | |
2810 | pr_err("pipe %c stat: 0x%08x\n", | |
2811 | pipe_name(pipe), I915_READ(PIPESTAT(pipe))); | |
2812 | /* pipestat has already been acked */ | |
2813 | } | |
2814 | if (eir & I915_ERROR_INSTRUCTION) { | |
2815 | pr_err("instruction error\n"); | |
2816 | pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); | |
2817 | for (i = 0; i < ARRAY_SIZE(instdone); i++) | |
2818 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | |
2819 | if (INTEL_INFO(dev)->gen < 4) { | |
2820 | u32 ipeir = I915_READ(IPEIR); | |
2821 | ||
2822 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); | |
2823 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); | |
2824 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); | |
2825 | I915_WRITE(IPEIR, ipeir); | |
2826 | POSTING_READ(IPEIR); | |
2827 | } else { | |
2828 | u32 ipeir = I915_READ(IPEIR_I965); | |
2829 | ||
2830 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); | |
2831 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | |
2832 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); | |
2833 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); | |
2834 | I915_WRITE(IPEIR_I965, ipeir); | |
2835 | POSTING_READ(IPEIR_I965); | |
2836 | } | |
2837 | } | |
2838 | ||
2839 | I915_WRITE(EIR, eir); | |
2840 | POSTING_READ(EIR); | |
2841 | eir = I915_READ(EIR); | |
2842 | if (eir) { | |
2843 | /* | |
2844 | * some errors might have become stuck, | |
2845 | * mask them. | |
2846 | */ | |
2847 | DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); | |
2848 | I915_WRITE(EMR, I915_READ(EMR) | eir); | |
2849 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
2850 | } | |
2851 | } | |
2852 | ||
2853 | /** | |
2854 | * i915_handle_error - handle an error interrupt | |
2855 | * @dev: drm device | |
2856 | * | |
2857 | * Do some basic checking of regsiter state at error interrupt time and | |
2858 | * dump it to the syslog. Also call i915_capture_error_state() to make | |
2859 | * sure we get a record and make it available in debugfs. Fire a uevent | |
2860 | * so userspace knows something bad happened (should trigger collection | |
2861 | * of a ring dump etc.). | |
2862 | */ | |
2863 | void i915_handle_error(struct drm_device *dev, bool wedged, | |
2864 | const char *fmt, ...) | |
2865 | { | |
2866 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2867 | va_list args; | |
2868 | char error_msg[80]; | |
2869 | ||
2870 | va_start(args, fmt); | |
2871 | vscnprintf(error_msg, sizeof(error_msg), fmt, args); | |
2872 | va_end(args); | |
2873 | ||
2874 | i915_capture_error_state(dev, wedged, error_msg); | |
2875 | i915_report_and_clear_eir(dev); | |
2876 | ||
2877 | if (wedged) { | |
2878 | atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, | |
2879 | &dev_priv->gpu_error.reset_counter); | |
2880 | ||
2881 | /* | |
2882 | * Wakeup waiting processes so that the reset work function | |
2883 | * i915_error_work_func doesn't deadlock trying to grab various | |
2884 | * locks. By bumping the reset counter first, the woken | |
2885 | * processes will see a reset in progress and back off, | |
2886 | * releasing their locks and then wait for the reset completion. | |
2887 | * We must do this for _all_ gpu waiters that might hold locks | |
2888 | * that the reset work needs to acquire. | |
2889 | * | |
2890 | * Note: The wake_up serves as the required memory barrier to | |
2891 | * ensure that the waiters see the updated value of the reset | |
2892 | * counter atomic_t. | |
2893 | */ | |
2894 | i915_error_wake_up(dev_priv, false); | |
2895 | } | |
2896 | ||
2897 | /* | |
2898 | * Our reset work can grab modeset locks (since it needs to reset the | |
2899 | * state of outstanding pagelips). Hence it must not be run on our own | |
2900 | * dev-priv->wq work queue for otherwise the flush_work in the pageflip | |
2901 | * code will deadlock. | |
2902 | */ | |
2903 | schedule_work(&dev_priv->gpu_error.work); | |
2904 | } | |
2905 | ||
2906 | /* Called from drm generic code, passed 'crtc' which | |
2907 | * we use as a pipe index | |
2908 | */ | |
2909 | static int i915_enable_vblank(struct drm_device *dev, int pipe) | |
2910 | { | |
2911 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2912 | unsigned long irqflags; | |
2913 | ||
2914 | if (!i915_pipe_enabled(dev, pipe)) | |
2915 | return -EINVAL; | |
2916 | ||
2917 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
2918 | if (INTEL_INFO(dev)->gen >= 4) | |
2919 | i915_enable_pipestat(dev_priv, pipe, | |
2920 | PIPE_START_VBLANK_INTERRUPT_STATUS); | |
2921 | else | |
2922 | i915_enable_pipestat(dev_priv, pipe, | |
2923 | PIPE_VBLANK_INTERRUPT_STATUS); | |
2924 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
2925 | ||
2926 | return 0; | |
2927 | } | |
2928 | ||
2929 | static int ironlake_enable_vblank(struct drm_device *dev, int pipe) | |
2930 | { | |
2931 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2932 | unsigned long irqflags; | |
2933 | uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : | |
2934 | DE_PIPE_VBLANK(pipe); | |
2935 | ||
2936 | if (!i915_pipe_enabled(dev, pipe)) | |
2937 | return -EINVAL; | |
2938 | ||
2939 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
2940 | ironlake_enable_display_irq(dev_priv, bit); | |
2941 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
2942 | ||
2943 | return 0; | |
2944 | } | |
2945 | ||
2946 | static int valleyview_enable_vblank(struct drm_device *dev, int pipe) | |
2947 | { | |
2948 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2949 | unsigned long irqflags; | |
2950 | ||
2951 | if (!i915_pipe_enabled(dev, pipe)) | |
2952 | return -EINVAL; | |
2953 | ||
2954 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
2955 | i915_enable_pipestat(dev_priv, pipe, | |
2956 | PIPE_START_VBLANK_INTERRUPT_STATUS); | |
2957 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
2958 | ||
2959 | return 0; | |
2960 | } | |
2961 | ||
2962 | static int gen8_enable_vblank(struct drm_device *dev, int pipe) | |
2963 | { | |
2964 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2965 | unsigned long irqflags; | |
2966 | ||
2967 | if (!i915_pipe_enabled(dev, pipe)) | |
2968 | return -EINVAL; | |
2969 | ||
2970 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
2971 | dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; | |
2972 | I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); | |
2973 | POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); | |
2974 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
2975 | return 0; | |
2976 | } | |
2977 | ||
2978 | /* Called from drm generic code, passed 'crtc' which | |
2979 | * we use as a pipe index | |
2980 | */ | |
2981 | static void i915_disable_vblank(struct drm_device *dev, int pipe) | |
2982 | { | |
2983 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2984 | unsigned long irqflags; | |
2985 | ||
2986 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
2987 | i915_disable_pipestat(dev_priv, pipe, | |
2988 | PIPE_VBLANK_INTERRUPT_STATUS | | |
2989 | PIPE_START_VBLANK_INTERRUPT_STATUS); | |
2990 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
2991 | } | |
2992 | ||
2993 | static void ironlake_disable_vblank(struct drm_device *dev, int pipe) | |
2994 | { | |
2995 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2996 | unsigned long irqflags; | |
2997 | uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : | |
2998 | DE_PIPE_VBLANK(pipe); | |
2999 | ||
3000 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3001 | ironlake_disable_display_irq(dev_priv, bit); | |
3002 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3003 | } | |
3004 | ||
3005 | static void valleyview_disable_vblank(struct drm_device *dev, int pipe) | |
3006 | { | |
3007 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3008 | unsigned long irqflags; | |
3009 | ||
3010 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3011 | i915_disable_pipestat(dev_priv, pipe, | |
3012 | PIPE_START_VBLANK_INTERRUPT_STATUS); | |
3013 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3014 | } | |
3015 | ||
3016 | static void gen8_disable_vblank(struct drm_device *dev, int pipe) | |
3017 | { | |
3018 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3019 | unsigned long irqflags; | |
3020 | ||
3021 | if (!i915_pipe_enabled(dev, pipe)) | |
3022 | return; | |
3023 | ||
3024 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3025 | dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; | |
3026 | I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); | |
3027 | POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); | |
3028 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3029 | } | |
3030 | ||
3031 | static u32 | |
3032 | ring_last_seqno(struct intel_engine_cs *ring) | |
3033 | { | |
3034 | return list_entry(ring->request_list.prev, | |
3035 | struct drm_i915_gem_request, list)->seqno; | |
3036 | } | |
3037 | ||
3038 | static bool | |
3039 | ring_idle(struct intel_engine_cs *ring, u32 seqno) | |
3040 | { | |
3041 | return (list_empty(&ring->request_list) || | |
3042 | i915_seqno_passed(seqno, ring_last_seqno(ring))); | |
3043 | } | |
3044 | ||
3045 | static bool | |
3046 | ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) | |
3047 | { | |
3048 | if (INTEL_INFO(dev)->gen >= 8) { | |
3049 | return (ipehr >> 23) == 0x1c; | |
3050 | } else { | |
3051 | ipehr &= ~MI_SEMAPHORE_SYNC_MASK; | |
3052 | return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | | |
3053 | MI_SEMAPHORE_REGISTER); | |
3054 | } | |
3055 | } | |
3056 | ||
3057 | static struct intel_engine_cs * | |
3058 | semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) | |
3059 | { | |
3060 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
3061 | struct intel_engine_cs *signaller; | |
3062 | int i; | |
3063 | ||
3064 | if (INTEL_INFO(dev_priv->dev)->gen >= 8) { | |
3065 | for_each_ring(signaller, dev_priv, i) { | |
3066 | if (ring == signaller) | |
3067 | continue; | |
3068 | ||
3069 | if (offset == signaller->semaphore.signal_ggtt[ring->id]) | |
3070 | return signaller; | |
3071 | } | |
3072 | } else { | |
3073 | u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; | |
3074 | ||
3075 | for_each_ring(signaller, dev_priv, i) { | |
3076 | if(ring == signaller) | |
3077 | continue; | |
3078 | ||
3079 | if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) | |
3080 | return signaller; | |
3081 | } | |
3082 | } | |
3083 | ||
3084 | DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", | |
3085 | ring->id, ipehr, offset); | |
3086 | ||
3087 | return NULL; | |
3088 | } | |
3089 | ||
3090 | static struct intel_engine_cs * | |
3091 | semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) | |
3092 | { | |
3093 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
3094 | u32 cmd, ipehr, head; | |
3095 | u64 offset = 0; | |
3096 | int i, backwards; | |
3097 | ||
3098 | ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); | |
3099 | if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) | |
3100 | return NULL; | |
3101 | ||
3102 | /* | |
3103 | * HEAD is likely pointing to the dword after the actual command, | |
3104 | * so scan backwards until we find the MBOX. But limit it to just 3 | |
3105 | * or 4 dwords depending on the semaphore wait command size. | |
3106 | * Note that we don't care about ACTHD here since that might | |
3107 | * point at at batch, and semaphores are always emitted into the | |
3108 | * ringbuffer itself. | |
3109 | */ | |
3110 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | |
3111 | backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; | |
3112 | ||
3113 | for (i = backwards; i; --i) { | |
3114 | /* | |
3115 | * Be paranoid and presume the hw has gone off into the wild - | |
3116 | * our ring is smaller than what the hardware (and hence | |
3117 | * HEAD_ADDR) allows. Also handles wrap-around. | |
3118 | */ | |
3119 | head &= ring->buffer->size - 1; | |
3120 | ||
3121 | /* This here seems to blow up */ | |
3122 | cmd = ioread32(ring->buffer->virtual_start + head); | |
3123 | if (cmd == ipehr) | |
3124 | break; | |
3125 | ||
3126 | head -= 4; | |
3127 | } | |
3128 | ||
3129 | if (!i) | |
3130 | return NULL; | |
3131 | ||
3132 | *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; | |
3133 | if (INTEL_INFO(ring->dev)->gen >= 8) { | |
3134 | offset = ioread32(ring->buffer->virtual_start + head + 12); | |
3135 | offset <<= 32; | |
3136 | offset = ioread32(ring->buffer->virtual_start + head + 8); | |
3137 | } | |
3138 | return semaphore_wait_to_signaller_ring(ring, ipehr, offset); | |
3139 | } | |
3140 | ||
3141 | static int semaphore_passed(struct intel_engine_cs *ring) | |
3142 | { | |
3143 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
3144 | struct intel_engine_cs *signaller; | |
3145 | u32 seqno; | |
3146 | ||
3147 | ring->hangcheck.deadlock++; | |
3148 | ||
3149 | signaller = semaphore_waits_for(ring, &seqno); | |
3150 | if (signaller == NULL) | |
3151 | return -1; | |
3152 | ||
3153 | /* Prevent pathological recursion due to driver bugs */ | |
3154 | if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) | |
3155 | return -1; | |
3156 | ||
3157 | if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) | |
3158 | return 1; | |
3159 | ||
3160 | /* cursory check for an unkickable deadlock */ | |
3161 | if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && | |
3162 | semaphore_passed(signaller) < 0) | |
3163 | return -1; | |
3164 | ||
3165 | return 0; | |
3166 | } | |
3167 | ||
3168 | static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) | |
3169 | { | |
3170 | struct intel_engine_cs *ring; | |
3171 | int i; | |
3172 | ||
3173 | for_each_ring(ring, dev_priv, i) | |
3174 | ring->hangcheck.deadlock = 0; | |
3175 | } | |
3176 | ||
3177 | static enum intel_ring_hangcheck_action | |
3178 | ring_stuck(struct intel_engine_cs *ring, u64 acthd) | |
3179 | { | |
3180 | struct drm_device *dev = ring->dev; | |
3181 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3182 | u32 tmp; | |
3183 | ||
3184 | if (acthd != ring->hangcheck.acthd) { | |
3185 | if (acthd > ring->hangcheck.max_acthd) { | |
3186 | ring->hangcheck.max_acthd = acthd; | |
3187 | return HANGCHECK_ACTIVE; | |
3188 | } | |
3189 | ||
3190 | return HANGCHECK_ACTIVE_LOOP; | |
3191 | } | |
3192 | ||
3193 | if (IS_GEN2(dev)) | |
3194 | return HANGCHECK_HUNG; | |
3195 | ||
3196 | /* Is the chip hanging on a WAIT_FOR_EVENT? | |
3197 | * If so we can simply poke the RB_WAIT bit | |
3198 | * and break the hang. This should work on | |
3199 | * all but the second generation chipsets. | |
3200 | */ | |
3201 | tmp = I915_READ_CTL(ring); | |
3202 | if (tmp & RING_WAIT) { | |
3203 | i915_handle_error(dev, false, | |
3204 | "Kicking stuck wait on %s", | |
3205 | ring->name); | |
3206 | I915_WRITE_CTL(ring, tmp); | |
3207 | return HANGCHECK_KICK; | |
3208 | } | |
3209 | ||
3210 | if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { | |
3211 | switch (semaphore_passed(ring)) { | |
3212 | default: | |
3213 | return HANGCHECK_HUNG; | |
3214 | case 1: | |
3215 | i915_handle_error(dev, false, | |
3216 | "Kicking stuck semaphore on %s", | |
3217 | ring->name); | |
3218 | I915_WRITE_CTL(ring, tmp); | |
3219 | return HANGCHECK_KICK; | |
3220 | case 0: | |
3221 | return HANGCHECK_WAIT; | |
3222 | } | |
3223 | } | |
3224 | ||
3225 | return HANGCHECK_HUNG; | |
3226 | } | |
3227 | ||
3228 | /** | |
3229 | * This is called when the chip hasn't reported back with completed | |
3230 | * batchbuffers in a long time. We keep track per ring seqno progress and | |
3231 | * if there are no progress, hangcheck score for that ring is increased. | |
3232 | * Further, acthd is inspected to see if the ring is stuck. On stuck case | |
3233 | * we kick the ring. If we see no progress on three subsequent calls | |
3234 | * we assume chip is wedged and try to fix it by resetting the chip. | |
3235 | */ | |
3236 | static void i915_hangcheck_elapsed(unsigned long data) | |
3237 | { | |
3238 | struct drm_device *dev = (struct drm_device *)data; | |
3239 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3240 | struct intel_engine_cs *ring; | |
3241 | int i; | |
3242 | int busy_count = 0, rings_hung = 0; | |
3243 | bool stuck[I915_NUM_RINGS] = { 0 }; | |
3244 | #define BUSY 1 | |
3245 | #define KICK 5 | |
3246 | #define HUNG 20 | |
3247 | ||
3248 | if (!i915.enable_hangcheck) | |
3249 | return; | |
3250 | ||
3251 | for_each_ring(ring, dev_priv, i) { | |
3252 | u64 acthd; | |
3253 | u32 seqno; | |
3254 | bool busy = true; | |
3255 | ||
3256 | semaphore_clear_deadlocks(dev_priv); | |
3257 | ||
3258 | seqno = ring->get_seqno(ring, false); | |
3259 | acthd = intel_ring_get_active_head(ring); | |
3260 | ||
3261 | if (ring->hangcheck.seqno == seqno) { | |
3262 | if (ring_idle(ring, seqno)) { | |
3263 | ring->hangcheck.action = HANGCHECK_IDLE; | |
3264 | ||
3265 | if (waitqueue_active(&ring->irq_queue)) { | |
3266 | /* Issue a wake-up to catch stuck h/w. */ | |
3267 | if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { | |
3268 | if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) | |
3269 | DRM_ERROR("Hangcheck timer elapsed... %s idle\n", | |
3270 | ring->name); | |
3271 | else | |
3272 | DRM_INFO("Fake missed irq on %s\n", | |
3273 | ring->name); | |
3274 | wake_up_all(&ring->irq_queue); | |
3275 | } | |
3276 | /* Safeguard against driver failure */ | |
3277 | ring->hangcheck.score += BUSY; | |
3278 | } else | |
3279 | busy = false; | |
3280 | } else { | |
3281 | /* We always increment the hangcheck score | |
3282 | * if the ring is busy and still processing | |
3283 | * the same request, so that no single request | |
3284 | * can run indefinitely (such as a chain of | |
3285 | * batches). The only time we do not increment | |
3286 | * the hangcheck score on this ring, if this | |
3287 | * ring is in a legitimate wait for another | |
3288 | * ring. In that case the waiting ring is a | |
3289 | * victim and we want to be sure we catch the | |
3290 | * right culprit. Then every time we do kick | |
3291 | * the ring, add a small increment to the | |
3292 | * score so that we can catch a batch that is | |
3293 | * being repeatedly kicked and so responsible | |
3294 | * for stalling the machine. | |
3295 | */ | |
3296 | ring->hangcheck.action = ring_stuck(ring, | |
3297 | acthd); | |
3298 | ||
3299 | switch (ring->hangcheck.action) { | |
3300 | case HANGCHECK_IDLE: | |
3301 | case HANGCHECK_WAIT: | |
3302 | case HANGCHECK_ACTIVE: | |
3303 | break; | |
3304 | case HANGCHECK_ACTIVE_LOOP: | |
3305 | ring->hangcheck.score += BUSY; | |
3306 | break; | |
3307 | case HANGCHECK_KICK: | |
3308 | ring->hangcheck.score += KICK; | |
3309 | break; | |
3310 | case HANGCHECK_HUNG: | |
3311 | ring->hangcheck.score += HUNG; | |
3312 | stuck[i] = true; | |
3313 | break; | |
3314 | } | |
3315 | } | |
3316 | } else { | |
3317 | ring->hangcheck.action = HANGCHECK_ACTIVE; | |
3318 | ||
3319 | /* Gradually reduce the count so that we catch DoS | |
3320 | * attempts across multiple batches. | |
3321 | */ | |
3322 | if (ring->hangcheck.score > 0) | |
3323 | ring->hangcheck.score--; | |
3324 | ||
3325 | ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; | |
3326 | } | |
3327 | ||
3328 | ring->hangcheck.seqno = seqno; | |
3329 | ring->hangcheck.acthd = acthd; | |
3330 | busy_count += busy; | |
3331 | } | |
3332 | ||
3333 | for_each_ring(ring, dev_priv, i) { | |
3334 | if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { | |
3335 | DRM_INFO("%s on %s\n", | |
3336 | stuck[i] ? "stuck" : "no progress", | |
3337 | ring->name); | |
3338 | rings_hung++; | |
3339 | } | |
3340 | } | |
3341 | ||
3342 | if (rings_hung) | |
3343 | return i915_handle_error(dev, true, "Ring hung"); | |
3344 | ||
3345 | if (busy_count) | |
3346 | /* Reset timer case chip hangs without another request | |
3347 | * being added */ | |
3348 | i915_queue_hangcheck(dev); | |
3349 | } | |
3350 | ||
3351 | void i915_queue_hangcheck(struct drm_device *dev) | |
3352 | { | |
3353 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3354 | if (!i915.enable_hangcheck) | |
3355 | return; | |
3356 | ||
3357 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, | |
3358 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); | |
3359 | } | |
3360 | ||
3361 | static void ibx_irq_reset(struct drm_device *dev) | |
3362 | { | |
3363 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3364 | ||
3365 | if (HAS_PCH_NOP(dev)) | |
3366 | return; | |
3367 | ||
3368 | GEN5_IRQ_RESET(SDE); | |
3369 | ||
3370 | if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) | |
3371 | I915_WRITE(SERR_INT, 0xffffffff); | |
3372 | } | |
3373 | ||
3374 | /* | |
3375 | * SDEIER is also touched by the interrupt handler to work around missed PCH | |
3376 | * interrupts. Hence we can't update it after the interrupt handler is enabled - | |
3377 | * instead we unconditionally enable all PCH interrupt sources here, but then | |
3378 | * only unmask them as needed with SDEIMR. | |
3379 | * | |
3380 | * This function needs to be called before interrupts are enabled. | |
3381 | */ | |
3382 | static void ibx_irq_pre_postinstall(struct drm_device *dev) | |
3383 | { | |
3384 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3385 | ||
3386 | if (HAS_PCH_NOP(dev)) | |
3387 | return; | |
3388 | ||
3389 | WARN_ON(I915_READ(SDEIER) != 0); | |
3390 | I915_WRITE(SDEIER, 0xffffffff); | |
3391 | POSTING_READ(SDEIER); | |
3392 | } | |
3393 | ||
3394 | static void gen5_gt_irq_reset(struct drm_device *dev) | |
3395 | { | |
3396 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3397 | ||
3398 | GEN5_IRQ_RESET(GT); | |
3399 | if (INTEL_INFO(dev)->gen >= 6) | |
3400 | GEN5_IRQ_RESET(GEN6_PM); | |
3401 | } | |
3402 | ||
3403 | /* drm_dma.h hooks | |
3404 | */ | |
3405 | static void ironlake_irq_reset(struct drm_device *dev) | |
3406 | { | |
3407 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3408 | ||
3409 | I915_WRITE(HWSTAM, 0xffffffff); | |
3410 | ||
3411 | GEN5_IRQ_RESET(DE); | |
3412 | if (IS_GEN7(dev)) | |
3413 | I915_WRITE(GEN7_ERR_INT, 0xffffffff); | |
3414 | ||
3415 | gen5_gt_irq_reset(dev); | |
3416 | ||
3417 | ibx_irq_reset(dev); | |
3418 | } | |
3419 | ||
3420 | static void valleyview_irq_preinstall(struct drm_device *dev) | |
3421 | { | |
3422 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3423 | int pipe; | |
3424 | ||
3425 | /* VLV magic */ | |
3426 | I915_WRITE(VLV_IMR, 0); | |
3427 | I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); | |
3428 | I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); | |
3429 | I915_WRITE(RING_IMR(BLT_RING_BASE), 0); | |
3430 | ||
3431 | /* and GT */ | |
3432 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
3433 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
3434 | ||
3435 | gen5_gt_irq_reset(dev); | |
3436 | ||
3437 | I915_WRITE(DPINVGTT, 0xff); | |
3438 | ||
3439 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
3440 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
3441 | for_each_pipe(dev_priv, pipe) | |
3442 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
3443 | I915_WRITE(VLV_IIR, 0xffffffff); | |
3444 | I915_WRITE(VLV_IMR, 0xffffffff); | |
3445 | I915_WRITE(VLV_IER, 0x0); | |
3446 | POSTING_READ(VLV_IER); | |
3447 | } | |
3448 | ||
3449 | static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) | |
3450 | { | |
3451 | GEN8_IRQ_RESET_NDX(GT, 0); | |
3452 | GEN8_IRQ_RESET_NDX(GT, 1); | |
3453 | GEN8_IRQ_RESET_NDX(GT, 2); | |
3454 | GEN8_IRQ_RESET_NDX(GT, 3); | |
3455 | } | |
3456 | ||
3457 | static void gen8_irq_reset(struct drm_device *dev) | |
3458 | { | |
3459 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3460 | int pipe; | |
3461 | ||
3462 | I915_WRITE(GEN8_MASTER_IRQ, 0); | |
3463 | POSTING_READ(GEN8_MASTER_IRQ); | |
3464 | ||
3465 | gen8_gt_irq_reset(dev_priv); | |
3466 | ||
3467 | for_each_pipe(dev_priv, pipe) | |
3468 | if (intel_display_power_enabled(dev_priv, | |
3469 | POWER_DOMAIN_PIPE(pipe))) | |
3470 | GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); | |
3471 | ||
3472 | GEN5_IRQ_RESET(GEN8_DE_PORT_); | |
3473 | GEN5_IRQ_RESET(GEN8_DE_MISC_); | |
3474 | GEN5_IRQ_RESET(GEN8_PCU_); | |
3475 | ||
3476 | ibx_irq_reset(dev); | |
3477 | } | |
3478 | ||
3479 | void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) | |
3480 | { | |
3481 | unsigned long irqflags; | |
3482 | ||
3483 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3484 | GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], | |
3485 | ~dev_priv->de_irq_mask[PIPE_B]); | |
3486 | GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], | |
3487 | ~dev_priv->de_irq_mask[PIPE_C]); | |
3488 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3489 | } | |
3490 | ||
3491 | static void cherryview_irq_preinstall(struct drm_device *dev) | |
3492 | { | |
3493 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3494 | int pipe; | |
3495 | ||
3496 | I915_WRITE(GEN8_MASTER_IRQ, 0); | |
3497 | POSTING_READ(GEN8_MASTER_IRQ); | |
3498 | ||
3499 | gen8_gt_irq_reset(dev_priv); | |
3500 | ||
3501 | GEN5_IRQ_RESET(GEN8_PCU_); | |
3502 | ||
3503 | POSTING_READ(GEN8_PCU_IIR); | |
3504 | ||
3505 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); | |
3506 | ||
3507 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
3508 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
3509 | ||
3510 | for_each_pipe(dev_priv, pipe) | |
3511 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
3512 | ||
3513 | I915_WRITE(VLV_IMR, 0xffffffff); | |
3514 | I915_WRITE(VLV_IER, 0x0); | |
3515 | I915_WRITE(VLV_IIR, 0xffffffff); | |
3516 | POSTING_READ(VLV_IIR); | |
3517 | } | |
3518 | ||
3519 | static void ibx_hpd_irq_setup(struct drm_device *dev) | |
3520 | { | |
3521 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3522 | struct intel_encoder *intel_encoder; | |
3523 | u32 hotplug_irqs, hotplug, enabled_irqs = 0; | |
3524 | ||
3525 | if (HAS_PCH_IBX(dev)) { | |
3526 | hotplug_irqs = SDE_HOTPLUG_MASK; | |
3527 | for_each_intel_encoder(dev, intel_encoder) | |
3528 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) | |
3529 | enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; | |
3530 | } else { | |
3531 | hotplug_irqs = SDE_HOTPLUG_MASK_CPT; | |
3532 | for_each_intel_encoder(dev, intel_encoder) | |
3533 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) | |
3534 | enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; | |
3535 | } | |
3536 | ||
3537 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); | |
3538 | ||
3539 | /* | |
3540 | * Enable digital hotplug on the PCH, and configure the DP short pulse | |
3541 | * duration to 2ms (which is the minimum in the Display Port spec) | |
3542 | * | |
3543 | * This register is the same on all known PCH chips. | |
3544 | */ | |
3545 | hotplug = I915_READ(PCH_PORT_HOTPLUG); | |
3546 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); | |
3547 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; | |
3548 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; | |
3549 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; | |
3550 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | |
3551 | } | |
3552 | ||
3553 | static void ibx_irq_postinstall(struct drm_device *dev) | |
3554 | { | |
3555 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3556 | u32 mask; | |
3557 | ||
3558 | if (HAS_PCH_NOP(dev)) | |
3559 | return; | |
3560 | ||
3561 | if (HAS_PCH_IBX(dev)) | |
3562 | mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; | |
3563 | else | |
3564 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; | |
3565 | ||
3566 | GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); | |
3567 | I915_WRITE(SDEIMR, ~mask); | |
3568 | } | |
3569 | ||
3570 | static void gen5_gt_irq_postinstall(struct drm_device *dev) | |
3571 | { | |
3572 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3573 | u32 pm_irqs, gt_irqs; | |
3574 | ||
3575 | pm_irqs = gt_irqs = 0; | |
3576 | ||
3577 | dev_priv->gt_irq_mask = ~0; | |
3578 | if (HAS_L3_DPF(dev)) { | |
3579 | /* L3 parity interrupt is always unmasked. */ | |
3580 | dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); | |
3581 | gt_irqs |= GT_PARITY_ERROR(dev); | |
3582 | } | |
3583 | ||
3584 | gt_irqs |= GT_RENDER_USER_INTERRUPT; | |
3585 | if (IS_GEN5(dev)) { | |
3586 | gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | | |
3587 | ILK_BSD_USER_INTERRUPT; | |
3588 | } else { | |
3589 | gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; | |
3590 | } | |
3591 | ||
3592 | GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); | |
3593 | ||
3594 | if (INTEL_INFO(dev)->gen >= 6) { | |
3595 | pm_irqs |= dev_priv->pm_rps_events; | |
3596 | ||
3597 | if (HAS_VEBOX(dev)) | |
3598 | pm_irqs |= PM_VEBOX_USER_INTERRUPT; | |
3599 | ||
3600 | dev_priv->pm_irq_mask = 0xffffffff; | |
3601 | GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); | |
3602 | } | |
3603 | } | |
3604 | ||
3605 | static int ironlake_irq_postinstall(struct drm_device *dev) | |
3606 | { | |
3607 | unsigned long irqflags; | |
3608 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3609 | u32 display_mask, extra_mask; | |
3610 | ||
3611 | if (INTEL_INFO(dev)->gen >= 7) { | |
3612 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | | |
3613 | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | | |
3614 | DE_PLANEB_FLIP_DONE_IVB | | |
3615 | DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); | |
3616 | extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | | |
3617 | DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); | |
3618 | } else { | |
3619 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | |
3620 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | | |
3621 | DE_AUX_CHANNEL_A | | |
3622 | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | | |
3623 | DE_POISON); | |
3624 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | | |
3625 | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; | |
3626 | } | |
3627 | ||
3628 | dev_priv->irq_mask = ~display_mask; | |
3629 | ||
3630 | I915_WRITE(HWSTAM, 0xeffe); | |
3631 | ||
3632 | ibx_irq_pre_postinstall(dev); | |
3633 | ||
3634 | GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); | |
3635 | ||
3636 | gen5_gt_irq_postinstall(dev); | |
3637 | ||
3638 | ibx_irq_postinstall(dev); | |
3639 | ||
3640 | if (IS_IRONLAKE_M(dev)) { | |
3641 | /* Enable PCU event interrupts | |
3642 | * | |
3643 | * spinlocking not required here for correctness since interrupt | |
3644 | * setup is guaranteed to run in single-threaded context. But we | |
3645 | * need it to make the assert_spin_locked happy. */ | |
3646 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3647 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); | |
3648 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3649 | } | |
3650 | ||
3651 | return 0; | |
3652 | } | |
3653 | ||
3654 | static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) | |
3655 | { | |
3656 | u32 pipestat_mask; | |
3657 | u32 iir_mask; | |
3658 | ||
3659 | pipestat_mask = PIPESTAT_INT_STATUS_MASK | | |
3660 | PIPE_FIFO_UNDERRUN_STATUS; | |
3661 | ||
3662 | I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); | |
3663 | I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); | |
3664 | POSTING_READ(PIPESTAT(PIPE_A)); | |
3665 | ||
3666 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | | |
3667 | PIPE_CRC_DONE_INTERRUPT_STATUS; | |
3668 | ||
3669 | i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask | | |
3670 | PIPE_GMBUS_INTERRUPT_STATUS); | |
3671 | i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask); | |
3672 | ||
3673 | iir_mask = I915_DISPLAY_PORT_INTERRUPT | | |
3674 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
3675 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | |
3676 | dev_priv->irq_mask &= ~iir_mask; | |
3677 | ||
3678 | I915_WRITE(VLV_IIR, iir_mask); | |
3679 | I915_WRITE(VLV_IIR, iir_mask); | |
3680 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | |
3681 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); | |
3682 | POSTING_READ(VLV_IER); | |
3683 | } | |
3684 | ||
3685 | static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) | |
3686 | { | |
3687 | u32 pipestat_mask; | |
3688 | u32 iir_mask; | |
3689 | ||
3690 | iir_mask = I915_DISPLAY_PORT_INTERRUPT | | |
3691 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
3692 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | |
3693 | ||
3694 | dev_priv->irq_mask |= iir_mask; | |
3695 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); | |
3696 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | |
3697 | I915_WRITE(VLV_IIR, iir_mask); | |
3698 | I915_WRITE(VLV_IIR, iir_mask); | |
3699 | POSTING_READ(VLV_IIR); | |
3700 | ||
3701 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | | |
3702 | PIPE_CRC_DONE_INTERRUPT_STATUS; | |
3703 | ||
3704 | i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask | | |
3705 | PIPE_GMBUS_INTERRUPT_STATUS); | |
3706 | i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask); | |
3707 | ||
3708 | pipestat_mask = PIPESTAT_INT_STATUS_MASK | | |
3709 | PIPE_FIFO_UNDERRUN_STATUS; | |
3710 | I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); | |
3711 | I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); | |
3712 | POSTING_READ(PIPESTAT(PIPE_A)); | |
3713 | } | |
3714 | ||
3715 | void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) | |
3716 | { | |
3717 | assert_spin_locked(&dev_priv->irq_lock); | |
3718 | ||
3719 | if (dev_priv->display_irqs_enabled) | |
3720 | return; | |
3721 | ||
3722 | dev_priv->display_irqs_enabled = true; | |
3723 | ||
3724 | if (dev_priv->dev->irq_enabled) | |
3725 | valleyview_display_irqs_install(dev_priv); | |
3726 | } | |
3727 | ||
3728 | void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) | |
3729 | { | |
3730 | assert_spin_locked(&dev_priv->irq_lock); | |
3731 | ||
3732 | if (!dev_priv->display_irqs_enabled) | |
3733 | return; | |
3734 | ||
3735 | dev_priv->display_irqs_enabled = false; | |
3736 | ||
3737 | if (dev_priv->dev->irq_enabled) | |
3738 | valleyview_display_irqs_uninstall(dev_priv); | |
3739 | } | |
3740 | ||
3741 | static int valleyview_irq_postinstall(struct drm_device *dev) | |
3742 | { | |
3743 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3744 | unsigned long irqflags; | |
3745 | ||
3746 | dev_priv->irq_mask = ~0; | |
3747 | ||
3748 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
3749 | POSTING_READ(PORT_HOTPLUG_EN); | |
3750 | ||
3751 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | |
3752 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); | |
3753 | I915_WRITE(VLV_IIR, 0xffffffff); | |
3754 | POSTING_READ(VLV_IER); | |
3755 | ||
3756 | /* Interrupt setup is already guaranteed to be single-threaded, this is | |
3757 | * just to make the assert_spin_locked check happy. */ | |
3758 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3759 | if (dev_priv->display_irqs_enabled) | |
3760 | valleyview_display_irqs_install(dev_priv); | |
3761 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3762 | ||
3763 | I915_WRITE(VLV_IIR, 0xffffffff); | |
3764 | I915_WRITE(VLV_IIR, 0xffffffff); | |
3765 | ||
3766 | gen5_gt_irq_postinstall(dev); | |
3767 | ||
3768 | /* ack & enable invalid PTE error interrupts */ | |
3769 | #if 0 /* FIXME: add support to irq handler for checking these bits */ | |
3770 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); | |
3771 | I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); | |
3772 | #endif | |
3773 | ||
3774 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); | |
3775 | ||
3776 | return 0; | |
3777 | } | |
3778 | ||
3779 | static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) | |
3780 | { | |
3781 | /* These are interrupts we'll toggle with the ring mask register */ | |
3782 | uint32_t gt_interrupts[] = { | |
3783 | GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | | |
3784 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | | |
3785 | GT_RENDER_L3_PARITY_ERROR_INTERRUPT | | |
3786 | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | | |
3787 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, | |
3788 | GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | | |
3789 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | | |
3790 | GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | | |
3791 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, | |
3792 | 0, | |
3793 | GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | | |
3794 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT | |
3795 | }; | |
3796 | ||
3797 | dev_priv->pm_irq_mask = 0xffffffff; | |
3798 | GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); | |
3799 | GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); | |
3800 | GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events); | |
3801 | GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); | |
3802 | } | |
3803 | ||
3804 | static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) | |
3805 | { | |
3806 | uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE | | |
3807 | GEN8_PIPE_CDCLK_CRC_DONE | | |
3808 | GEN8_DE_PIPE_IRQ_FAULT_ERRORS; | |
3809 | uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | | |
3810 | GEN8_PIPE_FIFO_UNDERRUN; | |
3811 | int pipe; | |
3812 | dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; | |
3813 | dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; | |
3814 | dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; | |
3815 | ||
3816 | for_each_pipe(dev_priv, pipe) | |
3817 | if (intel_display_power_enabled(dev_priv, | |
3818 | POWER_DOMAIN_PIPE(pipe))) | |
3819 | GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, | |
3820 | dev_priv->de_irq_mask[pipe], | |
3821 | de_pipe_enables); | |
3822 | ||
3823 | GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A); | |
3824 | } | |
3825 | ||
3826 | static int gen8_irq_postinstall(struct drm_device *dev) | |
3827 | { | |
3828 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3829 | ||
3830 | ibx_irq_pre_postinstall(dev); | |
3831 | ||
3832 | gen8_gt_irq_postinstall(dev_priv); | |
3833 | gen8_de_irq_postinstall(dev_priv); | |
3834 | ||
3835 | ibx_irq_postinstall(dev); | |
3836 | ||
3837 | I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); | |
3838 | POSTING_READ(GEN8_MASTER_IRQ); | |
3839 | ||
3840 | return 0; | |
3841 | } | |
3842 | ||
3843 | static int cherryview_irq_postinstall(struct drm_device *dev) | |
3844 | { | |
3845 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3846 | u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT | | |
3847 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
3848 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
3849 | I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; | |
3850 | u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV | | |
3851 | PIPE_CRC_DONE_INTERRUPT_STATUS; | |
3852 | unsigned long irqflags; | |
3853 | int pipe; | |
3854 | ||
3855 | /* | |
3856 | * Leave vblank interrupts masked initially. enable/disable will | |
3857 | * toggle them based on usage. | |
3858 | */ | |
3859 | dev_priv->irq_mask = ~enable_mask; | |
3860 | ||
3861 | for_each_pipe(dev_priv, pipe) | |
3862 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
3863 | ||
3864 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3865 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); | |
3866 | for_each_pipe(dev_priv, pipe) | |
3867 | i915_enable_pipestat(dev_priv, pipe, pipestat_enable); | |
3868 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3869 | ||
3870 | I915_WRITE(VLV_IIR, 0xffffffff); | |
3871 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | |
3872 | I915_WRITE(VLV_IER, enable_mask); | |
3873 | ||
3874 | gen8_gt_irq_postinstall(dev_priv); | |
3875 | ||
3876 | I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); | |
3877 | POSTING_READ(GEN8_MASTER_IRQ); | |
3878 | ||
3879 | return 0; | |
3880 | } | |
3881 | ||
3882 | static void gen8_irq_uninstall(struct drm_device *dev) | |
3883 | { | |
3884 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3885 | ||
3886 | if (!dev_priv) | |
3887 | return; | |
3888 | ||
3889 | gen8_irq_reset(dev); | |
3890 | } | |
3891 | ||
3892 | static void valleyview_irq_uninstall(struct drm_device *dev) | |
3893 | { | |
3894 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3895 | unsigned long irqflags; | |
3896 | int pipe; | |
3897 | ||
3898 | if (!dev_priv) | |
3899 | return; | |
3900 | ||
3901 | I915_WRITE(VLV_MASTER_IER, 0); | |
3902 | ||
3903 | for_each_pipe(dev_priv, pipe) | |
3904 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
3905 | ||
3906 | I915_WRITE(HWSTAM, 0xffffffff); | |
3907 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
3908 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
3909 | ||
3910 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3911 | if (dev_priv->display_irqs_enabled) | |
3912 | valleyview_display_irqs_uninstall(dev_priv); | |
3913 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3914 | ||
3915 | dev_priv->irq_mask = 0; | |
3916 | ||
3917 | I915_WRITE(VLV_IIR, 0xffffffff); | |
3918 | I915_WRITE(VLV_IMR, 0xffffffff); | |
3919 | I915_WRITE(VLV_IER, 0x0); | |
3920 | POSTING_READ(VLV_IER); | |
3921 | } | |
3922 | ||
3923 | static void cherryview_irq_uninstall(struct drm_device *dev) | |
3924 | { | |
3925 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3926 | int pipe; | |
3927 | ||
3928 | if (!dev_priv) | |
3929 | return; | |
3930 | ||
3931 | I915_WRITE(GEN8_MASTER_IRQ, 0); | |
3932 | POSTING_READ(GEN8_MASTER_IRQ); | |
3933 | ||
3934 | #define GEN8_IRQ_FINI_NDX(type, which) \ | |
3935 | do { \ | |
3936 | I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ | |
3937 | I915_WRITE(GEN8_##type##_IER(which), 0); \ | |
3938 | I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ | |
3939 | POSTING_READ(GEN8_##type##_IIR(which)); \ | |
3940 | I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ | |
3941 | } while (0) | |
3942 | ||
3943 | #define GEN8_IRQ_FINI(type) \ | |
3944 | do { \ | |
3945 | I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ | |
3946 | I915_WRITE(GEN8_##type##_IER, 0); \ | |
3947 | I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ | |
3948 | POSTING_READ(GEN8_##type##_IIR); \ | |
3949 | I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ | |
3950 | } while (0) | |
3951 | ||
3952 | GEN8_IRQ_FINI_NDX(GT, 0); | |
3953 | GEN8_IRQ_FINI_NDX(GT, 1); | |
3954 | GEN8_IRQ_FINI_NDX(GT, 2); | |
3955 | GEN8_IRQ_FINI_NDX(GT, 3); | |
3956 | ||
3957 | GEN8_IRQ_FINI(PCU); | |
3958 | ||
3959 | #undef GEN8_IRQ_FINI | |
3960 | #undef GEN8_IRQ_FINI_NDX | |
3961 | ||
3962 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
3963 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
3964 | ||
3965 | for_each_pipe(dev_priv, pipe) | |
3966 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
3967 | ||
3968 | I915_WRITE(VLV_IMR, 0xffffffff); | |
3969 | I915_WRITE(VLV_IER, 0x0); | |
3970 | I915_WRITE(VLV_IIR, 0xffffffff); | |
3971 | POSTING_READ(VLV_IIR); | |
3972 | } | |
3973 | ||
3974 | static void ironlake_irq_uninstall(struct drm_device *dev) | |
3975 | { | |
3976 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3977 | ||
3978 | if (!dev_priv) | |
3979 | return; | |
3980 | ||
3981 | ironlake_irq_reset(dev); | |
3982 | } | |
3983 | ||
3984 | static void i8xx_irq_preinstall(struct drm_device * dev) | |
3985 | { | |
3986 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3987 | int pipe; | |
3988 | ||
3989 | for_each_pipe(dev_priv, pipe) | |
3990 | I915_WRITE(PIPESTAT(pipe), 0); | |
3991 | I915_WRITE16(IMR, 0xffff); | |
3992 | I915_WRITE16(IER, 0x0); | |
3993 | POSTING_READ16(IER); | |
3994 | } | |
3995 | ||
3996 | static int i8xx_irq_postinstall(struct drm_device *dev) | |
3997 | { | |
3998 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3999 | unsigned long irqflags; | |
4000 | ||
4001 | I915_WRITE16(EMR, | |
4002 | ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); | |
4003 | ||
4004 | /* Unmask the interrupts that we always want on. */ | |
4005 | dev_priv->irq_mask = | |
4006 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
4007 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
4008 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
4009 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
4010 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
4011 | I915_WRITE16(IMR, dev_priv->irq_mask); | |
4012 | ||
4013 | I915_WRITE16(IER, | |
4014 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
4015 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
4016 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | |
4017 | I915_USER_INTERRUPT); | |
4018 | POSTING_READ16(IER); | |
4019 | ||
4020 | /* Interrupt setup is already guaranteed to be single-threaded, this is | |
4021 | * just to make the assert_spin_locked check happy. */ | |
4022 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
4023 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); | |
4024 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); | |
4025 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
4026 | ||
4027 | return 0; | |
4028 | } | |
4029 | ||
4030 | /* | |
4031 | * Returns true when a page flip has completed. | |
4032 | */ | |
4033 | static bool i8xx_handle_vblank(struct drm_device *dev, | |
4034 | int plane, int pipe, u32 iir) | |
4035 | { | |
4036 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4037 | u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); | |
4038 | ||
4039 | if (!intel_pipe_handle_vblank(dev, pipe)) | |
4040 | return false; | |
4041 | ||
4042 | if ((iir & flip_pending) == 0) | |
4043 | goto check_page_flip; | |
4044 | ||
4045 | intel_prepare_page_flip(dev, plane); | |
4046 | ||
4047 | /* We detect FlipDone by looking for the change in PendingFlip from '1' | |
4048 | * to '0' on the following vblank, i.e. IIR has the Pendingflip | |
4049 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence | |
4050 | * the flip is completed (no longer pending). Since this doesn't raise | |
4051 | * an interrupt per se, we watch for the change at vblank. | |
4052 | */ | |
4053 | if (I915_READ16(ISR) & flip_pending) | |
4054 | goto check_page_flip; | |
4055 | ||
4056 | intel_finish_page_flip(dev, pipe); | |
4057 | return true; | |
4058 | ||
4059 | check_page_flip: | |
4060 | intel_check_page_flip(dev, pipe); | |
4061 | return false; | |
4062 | } | |
4063 | ||
4064 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) | |
4065 | { | |
4066 | struct drm_device *dev = arg; | |
4067 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4068 | u16 iir, new_iir; | |
4069 | u32 pipe_stats[2]; | |
4070 | unsigned long irqflags; | |
4071 | int pipe; | |
4072 | u16 flip_mask = | |
4073 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
4074 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
4075 | ||
4076 | iir = I915_READ16(IIR); | |
4077 | if (iir == 0) | |
4078 | return IRQ_NONE; | |
4079 | ||
4080 | while (iir & ~flip_mask) { | |
4081 | /* Can't rely on pipestat interrupt bit in iir as it might | |
4082 | * have been cleared after the pipestat interrupt was received. | |
4083 | * It doesn't set the bit in iir again, but it still produces | |
4084 | * interrupts (for non-MSI). | |
4085 | */ | |
4086 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
4087 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
4088 | i915_handle_error(dev, false, | |
4089 | "Command parser error, iir 0x%08x", | |
4090 | iir); | |
4091 | ||
4092 | for_each_pipe(dev_priv, pipe) { | |
4093 | int reg = PIPESTAT(pipe); | |
4094 | pipe_stats[pipe] = I915_READ(reg); | |
4095 | ||
4096 | /* | |
4097 | * Clear the PIPE*STAT regs before the IIR | |
4098 | */ | |
4099 | if (pipe_stats[pipe] & 0x8000ffff) | |
4100 | I915_WRITE(reg, pipe_stats[pipe]); | |
4101 | } | |
4102 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
4103 | ||
4104 | I915_WRITE16(IIR, iir & ~flip_mask); | |
4105 | new_iir = I915_READ16(IIR); /* Flush posted writes */ | |
4106 | ||
4107 | i915_update_dri1_breadcrumb(dev); | |
4108 | ||
4109 | if (iir & I915_USER_INTERRUPT) | |
4110 | notify_ring(dev, &dev_priv->ring[RCS]); | |
4111 | ||
4112 | for_each_pipe(dev_priv, pipe) { | |
4113 | int plane = pipe; | |
4114 | if (HAS_FBC(dev)) | |
4115 | plane = !plane; | |
4116 | ||
4117 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && | |
4118 | i8xx_handle_vblank(dev, plane, pipe, iir)) | |
4119 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); | |
4120 | ||
4121 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) | |
4122 | i9xx_pipe_crc_irq_handler(dev, pipe); | |
4123 | ||
4124 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && | |
4125 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) | |
4126 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); | |
4127 | } | |
4128 | ||
4129 | iir = new_iir; | |
4130 | } | |
4131 | ||
4132 | return IRQ_HANDLED; | |
4133 | } | |
4134 | ||
4135 | static void i8xx_irq_uninstall(struct drm_device * dev) | |
4136 | { | |
4137 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4138 | int pipe; | |
4139 | ||
4140 | for_each_pipe(dev_priv, pipe) { | |
4141 | /* Clear enable bits; then clear status bits */ | |
4142 | I915_WRITE(PIPESTAT(pipe), 0); | |
4143 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); | |
4144 | } | |
4145 | I915_WRITE16(IMR, 0xffff); | |
4146 | I915_WRITE16(IER, 0x0); | |
4147 | I915_WRITE16(IIR, I915_READ16(IIR)); | |
4148 | } | |
4149 | ||
4150 | static void i915_irq_preinstall(struct drm_device * dev) | |
4151 | { | |
4152 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4153 | int pipe; | |
4154 | ||
4155 | if (I915_HAS_HOTPLUG(dev)) { | |
4156 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
4157 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
4158 | } | |
4159 | ||
4160 | I915_WRITE16(HWSTAM, 0xeffe); | |
4161 | for_each_pipe(dev_priv, pipe) | |
4162 | I915_WRITE(PIPESTAT(pipe), 0); | |
4163 | I915_WRITE(IMR, 0xffffffff); | |
4164 | I915_WRITE(IER, 0x0); | |
4165 | POSTING_READ(IER); | |
4166 | } | |
4167 | ||
4168 | static int i915_irq_postinstall(struct drm_device *dev) | |
4169 | { | |
4170 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4171 | u32 enable_mask; | |
4172 | unsigned long irqflags; | |
4173 | ||
4174 | I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); | |
4175 | ||
4176 | /* Unmask the interrupts that we always want on. */ | |
4177 | dev_priv->irq_mask = | |
4178 | ~(I915_ASLE_INTERRUPT | | |
4179 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
4180 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
4181 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
4182 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
4183 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
4184 | ||
4185 | enable_mask = | |
4186 | I915_ASLE_INTERRUPT | | |
4187 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
4188 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
4189 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | |
4190 | I915_USER_INTERRUPT; | |
4191 | ||
4192 | if (I915_HAS_HOTPLUG(dev)) { | |
4193 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
4194 | POSTING_READ(PORT_HOTPLUG_EN); | |
4195 | ||
4196 | /* Enable in IER... */ | |
4197 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | |
4198 | /* and unmask in IMR */ | |
4199 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; | |
4200 | } | |
4201 | ||
4202 | I915_WRITE(IMR, dev_priv->irq_mask); | |
4203 | I915_WRITE(IER, enable_mask); | |
4204 | POSTING_READ(IER); | |
4205 | ||
4206 | i915_enable_asle_pipestat(dev); | |
4207 | ||
4208 | /* Interrupt setup is already guaranteed to be single-threaded, this is | |
4209 | * just to make the assert_spin_locked check happy. */ | |
4210 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
4211 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); | |
4212 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); | |
4213 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
4214 | ||
4215 | return 0; | |
4216 | } | |
4217 | ||
4218 | /* | |
4219 | * Returns true when a page flip has completed. | |
4220 | */ | |
4221 | static bool i915_handle_vblank(struct drm_device *dev, | |
4222 | int plane, int pipe, u32 iir) | |
4223 | { | |
4224 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4225 | u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); | |
4226 | ||
4227 | if (!intel_pipe_handle_vblank(dev, pipe)) | |
4228 | return false; | |
4229 | ||
4230 | if ((iir & flip_pending) == 0) | |
4231 | goto check_page_flip; | |
4232 | ||
4233 | intel_prepare_page_flip(dev, plane); | |
4234 | ||
4235 | /* We detect FlipDone by looking for the change in PendingFlip from '1' | |
4236 | * to '0' on the following vblank, i.e. IIR has the Pendingflip | |
4237 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence | |
4238 | * the flip is completed (no longer pending). Since this doesn't raise | |
4239 | * an interrupt per se, we watch for the change at vblank. | |
4240 | */ | |
4241 | if (I915_READ(ISR) & flip_pending) | |
4242 | goto check_page_flip; | |
4243 | ||
4244 | intel_finish_page_flip(dev, pipe); | |
4245 | return true; | |
4246 | ||
4247 | check_page_flip: | |
4248 | intel_check_page_flip(dev, pipe); | |
4249 | return false; | |
4250 | } | |
4251 | ||
4252 | static irqreturn_t i915_irq_handler(int irq, void *arg) | |
4253 | { | |
4254 | struct drm_device *dev = arg; | |
4255 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4256 | u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; | |
4257 | unsigned long irqflags; | |
4258 | u32 flip_mask = | |
4259 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
4260 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
4261 | int pipe, ret = IRQ_NONE; | |
4262 | ||
4263 | iir = I915_READ(IIR); | |
4264 | do { | |
4265 | bool irq_received = (iir & ~flip_mask) != 0; | |
4266 | bool blc_event = false; | |
4267 | ||
4268 | /* Can't rely on pipestat interrupt bit in iir as it might | |
4269 | * have been cleared after the pipestat interrupt was received. | |
4270 | * It doesn't set the bit in iir again, but it still produces | |
4271 | * interrupts (for non-MSI). | |
4272 | */ | |
4273 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
4274 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
4275 | i915_handle_error(dev, false, | |
4276 | "Command parser error, iir 0x%08x", | |
4277 | iir); | |
4278 | ||
4279 | for_each_pipe(dev_priv, pipe) { | |
4280 | int reg = PIPESTAT(pipe); | |
4281 | pipe_stats[pipe] = I915_READ(reg); | |
4282 | ||
4283 | /* Clear the PIPE*STAT regs before the IIR */ | |
4284 | if (pipe_stats[pipe] & 0x8000ffff) { | |
4285 | I915_WRITE(reg, pipe_stats[pipe]); | |
4286 | irq_received = true; | |
4287 | } | |
4288 | } | |
4289 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
4290 | ||
4291 | if (!irq_received) | |
4292 | break; | |
4293 | ||
4294 | /* Consume port. Then clear IIR or we'll miss events */ | |
4295 | if (I915_HAS_HOTPLUG(dev) && | |
4296 | iir & I915_DISPLAY_PORT_INTERRUPT) | |
4297 | i9xx_hpd_irq_handler(dev); | |
4298 | ||
4299 | I915_WRITE(IIR, iir & ~flip_mask); | |
4300 | new_iir = I915_READ(IIR); /* Flush posted writes */ | |
4301 | ||
4302 | if (iir & I915_USER_INTERRUPT) | |
4303 | notify_ring(dev, &dev_priv->ring[RCS]); | |
4304 | ||
4305 | for_each_pipe(dev_priv, pipe) { | |
4306 | int plane = pipe; | |
4307 | if (HAS_FBC(dev)) | |
4308 | plane = !plane; | |
4309 | ||
4310 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && | |
4311 | i915_handle_vblank(dev, plane, pipe, iir)) | |
4312 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); | |
4313 | ||
4314 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | |
4315 | blc_event = true; | |
4316 | ||
4317 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) | |
4318 | i9xx_pipe_crc_irq_handler(dev, pipe); | |
4319 | ||
4320 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && | |
4321 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) | |
4322 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); | |
4323 | } | |
4324 | ||
4325 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) | |
4326 | intel_opregion_asle_intr(dev); | |
4327 | ||
4328 | /* With MSI, interrupts are only generated when iir | |
4329 | * transitions from zero to nonzero. If another bit got | |
4330 | * set while we were handling the existing iir bits, then | |
4331 | * we would never get another interrupt. | |
4332 | * | |
4333 | * This is fine on non-MSI as well, as if we hit this path | |
4334 | * we avoid exiting the interrupt handler only to generate | |
4335 | * another one. | |
4336 | * | |
4337 | * Note that for MSI this could cause a stray interrupt report | |
4338 | * if an interrupt landed in the time between writing IIR and | |
4339 | * the posting read. This should be rare enough to never | |
4340 | * trigger the 99% of 100,000 interrupts test for disabling | |
4341 | * stray interrupts. | |
4342 | */ | |
4343 | ret = IRQ_HANDLED; | |
4344 | iir = new_iir; | |
4345 | } while (iir & ~flip_mask); | |
4346 | ||
4347 | i915_update_dri1_breadcrumb(dev); | |
4348 | ||
4349 | return ret; | |
4350 | } | |
4351 | ||
4352 | static void i915_irq_uninstall(struct drm_device * dev) | |
4353 | { | |
4354 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4355 | int pipe; | |
4356 | ||
4357 | if (I915_HAS_HOTPLUG(dev)) { | |
4358 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
4359 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
4360 | } | |
4361 | ||
4362 | I915_WRITE16(HWSTAM, 0xffff); | |
4363 | for_each_pipe(dev_priv, pipe) { | |
4364 | /* Clear enable bits; then clear status bits */ | |
4365 | I915_WRITE(PIPESTAT(pipe), 0); | |
4366 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); | |
4367 | } | |
4368 | I915_WRITE(IMR, 0xffffffff); | |
4369 | I915_WRITE(IER, 0x0); | |
4370 | ||
4371 | I915_WRITE(IIR, I915_READ(IIR)); | |
4372 | } | |
4373 | ||
4374 | static void i965_irq_preinstall(struct drm_device * dev) | |
4375 | { | |
4376 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4377 | int pipe; | |
4378 | ||
4379 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
4380 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
4381 | ||
4382 | I915_WRITE(HWSTAM, 0xeffe); | |
4383 | for_each_pipe(dev_priv, pipe) | |
4384 | I915_WRITE(PIPESTAT(pipe), 0); | |
4385 | I915_WRITE(IMR, 0xffffffff); | |
4386 | I915_WRITE(IER, 0x0); | |
4387 | POSTING_READ(IER); | |
4388 | } | |
4389 | ||
4390 | static int i965_irq_postinstall(struct drm_device *dev) | |
4391 | { | |
4392 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4393 | u32 enable_mask; | |
4394 | u32 error_mask; | |
4395 | unsigned long irqflags; | |
4396 | ||
4397 | /* Unmask the interrupts that we always want on. */ | |
4398 | dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | | |
4399 | I915_DISPLAY_PORT_INTERRUPT | | |
4400 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
4401 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
4402 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
4403 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
4404 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
4405 | ||
4406 | enable_mask = ~dev_priv->irq_mask; | |
4407 | enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
4408 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); | |
4409 | enable_mask |= I915_USER_INTERRUPT; | |
4410 | ||
4411 | if (IS_G4X(dev)) | |
4412 | enable_mask |= I915_BSD_USER_INTERRUPT; | |
4413 | ||
4414 | /* Interrupt setup is already guaranteed to be single-threaded, this is | |
4415 | * just to make the assert_spin_locked check happy. */ | |
4416 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
4417 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); | |
4418 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); | |
4419 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); | |
4420 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
4421 | ||
4422 | /* | |
4423 | * Enable some error detection, note the instruction error mask | |
4424 | * bit is reserved, so we leave it masked. | |
4425 | */ | |
4426 | if (IS_G4X(dev)) { | |
4427 | error_mask = ~(GM45_ERROR_PAGE_TABLE | | |
4428 | GM45_ERROR_MEM_PRIV | | |
4429 | GM45_ERROR_CP_PRIV | | |
4430 | I915_ERROR_MEMORY_REFRESH); | |
4431 | } else { | |
4432 | error_mask = ~(I915_ERROR_PAGE_TABLE | | |
4433 | I915_ERROR_MEMORY_REFRESH); | |
4434 | } | |
4435 | I915_WRITE(EMR, error_mask); | |
4436 | ||
4437 | I915_WRITE(IMR, dev_priv->irq_mask); | |
4438 | I915_WRITE(IER, enable_mask); | |
4439 | POSTING_READ(IER); | |
4440 | ||
4441 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
4442 | POSTING_READ(PORT_HOTPLUG_EN); | |
4443 | ||
4444 | i915_enable_asle_pipestat(dev); | |
4445 | ||
4446 | return 0; | |
4447 | } | |
4448 | ||
4449 | static void i915_hpd_irq_setup(struct drm_device *dev) | |
4450 | { | |
4451 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4452 | struct intel_encoder *intel_encoder; | |
4453 | u32 hotplug_en; | |
4454 | ||
4455 | assert_spin_locked(&dev_priv->irq_lock); | |
4456 | ||
4457 | if (I915_HAS_HOTPLUG(dev)) { | |
4458 | hotplug_en = I915_READ(PORT_HOTPLUG_EN); | |
4459 | hotplug_en &= ~HOTPLUG_INT_EN_MASK; | |
4460 | /* Note HDMI and DP share hotplug bits */ | |
4461 | /* enable bits are the same for all generations */ | |
4462 | for_each_intel_encoder(dev, intel_encoder) | |
4463 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) | |
4464 | hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; | |
4465 | /* Programming the CRT detection parameters tends | |
4466 | to generate a spurious hotplug event about three | |
4467 | seconds later. So just do it once. | |
4468 | */ | |
4469 | if (IS_G4X(dev)) | |
4470 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | |
4471 | hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; | |
4472 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | |
4473 | ||
4474 | /* Ignore TV since it's buggy */ | |
4475 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | |
4476 | } | |
4477 | } | |
4478 | ||
4479 | static irqreturn_t i965_irq_handler(int irq, void *arg) | |
4480 | { | |
4481 | struct drm_device *dev = arg; | |
4482 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4483 | u32 iir, new_iir; | |
4484 | u32 pipe_stats[I915_MAX_PIPES]; | |
4485 | unsigned long irqflags; | |
4486 | int ret = IRQ_NONE, pipe; | |
4487 | u32 flip_mask = | |
4488 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
4489 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
4490 | ||
4491 | iir = I915_READ(IIR); | |
4492 | ||
4493 | for (;;) { | |
4494 | bool irq_received = (iir & ~flip_mask) != 0; | |
4495 | bool blc_event = false; | |
4496 | ||
4497 | /* Can't rely on pipestat interrupt bit in iir as it might | |
4498 | * have been cleared after the pipestat interrupt was received. | |
4499 | * It doesn't set the bit in iir again, but it still produces | |
4500 | * interrupts (for non-MSI). | |
4501 | */ | |
4502 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
4503 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
4504 | i915_handle_error(dev, false, | |
4505 | "Command parser error, iir 0x%08x", | |
4506 | iir); | |
4507 | ||
4508 | for_each_pipe(dev_priv, pipe) { | |
4509 | int reg = PIPESTAT(pipe); | |
4510 | pipe_stats[pipe] = I915_READ(reg); | |
4511 | ||
4512 | /* | |
4513 | * Clear the PIPE*STAT regs before the IIR | |
4514 | */ | |
4515 | if (pipe_stats[pipe] & 0x8000ffff) { | |
4516 | I915_WRITE(reg, pipe_stats[pipe]); | |
4517 | irq_received = true; | |
4518 | } | |
4519 | } | |
4520 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
4521 | ||
4522 | if (!irq_received) | |
4523 | break; | |
4524 | ||
4525 | ret = IRQ_HANDLED; | |
4526 | ||
4527 | /* Consume port. Then clear IIR or we'll miss events */ | |
4528 | if (iir & I915_DISPLAY_PORT_INTERRUPT) | |
4529 | i9xx_hpd_irq_handler(dev); | |
4530 | ||
4531 | I915_WRITE(IIR, iir & ~flip_mask); | |
4532 | new_iir = I915_READ(IIR); /* Flush posted writes */ | |
4533 | ||
4534 | if (iir & I915_USER_INTERRUPT) | |
4535 | notify_ring(dev, &dev_priv->ring[RCS]); | |
4536 | if (iir & I915_BSD_USER_INTERRUPT) | |
4537 | notify_ring(dev, &dev_priv->ring[VCS]); | |
4538 | ||
4539 | for_each_pipe(dev_priv, pipe) { | |
4540 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && | |
4541 | i915_handle_vblank(dev, pipe, pipe, iir)) | |
4542 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); | |
4543 | ||
4544 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | |
4545 | blc_event = true; | |
4546 | ||
4547 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) | |
4548 | i9xx_pipe_crc_irq_handler(dev, pipe); | |
4549 | ||
4550 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && | |
4551 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) | |
4552 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); | |
4553 | } | |
4554 | ||
4555 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) | |
4556 | intel_opregion_asle_intr(dev); | |
4557 | ||
4558 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) | |
4559 | gmbus_irq_handler(dev); | |
4560 | ||
4561 | /* With MSI, interrupts are only generated when iir | |
4562 | * transitions from zero to nonzero. If another bit got | |
4563 | * set while we were handling the existing iir bits, then | |
4564 | * we would never get another interrupt. | |
4565 | * | |
4566 | * This is fine on non-MSI as well, as if we hit this path | |
4567 | * we avoid exiting the interrupt handler only to generate | |
4568 | * another one. | |
4569 | * | |
4570 | * Note that for MSI this could cause a stray interrupt report | |
4571 | * if an interrupt landed in the time between writing IIR and | |
4572 | * the posting read. This should be rare enough to never | |
4573 | * trigger the 99% of 100,000 interrupts test for disabling | |
4574 | * stray interrupts. | |
4575 | */ | |
4576 | iir = new_iir; | |
4577 | } | |
4578 | ||
4579 | i915_update_dri1_breadcrumb(dev); | |
4580 | ||
4581 | return ret; | |
4582 | } | |
4583 | ||
4584 | static void i965_irq_uninstall(struct drm_device * dev) | |
4585 | { | |
4586 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4587 | int pipe; | |
4588 | ||
4589 | if (!dev_priv) | |
4590 | return; | |
4591 | ||
4592 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
4593 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
4594 | ||
4595 | I915_WRITE(HWSTAM, 0xffffffff); | |
4596 | for_each_pipe(dev_priv, pipe) | |
4597 | I915_WRITE(PIPESTAT(pipe), 0); | |
4598 | I915_WRITE(IMR, 0xffffffff); | |
4599 | I915_WRITE(IER, 0x0); | |
4600 | ||
4601 | for_each_pipe(dev_priv, pipe) | |
4602 | I915_WRITE(PIPESTAT(pipe), | |
4603 | I915_READ(PIPESTAT(pipe)) & 0x8000ffff); | |
4604 | I915_WRITE(IIR, I915_READ(IIR)); | |
4605 | } | |
4606 | ||
4607 | static void intel_hpd_irq_reenable(struct work_struct *work) | |
4608 | { | |
4609 | struct drm_i915_private *dev_priv = | |
4610 | container_of(work, typeof(*dev_priv), | |
4611 | hotplug_reenable_work.work); | |
4612 | struct drm_device *dev = dev_priv->dev; | |
4613 | struct drm_mode_config *mode_config = &dev->mode_config; | |
4614 | unsigned long irqflags; | |
4615 | int i; | |
4616 | ||
4617 | intel_runtime_pm_get(dev_priv); | |
4618 | ||
4619 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
4620 | for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { | |
4621 | struct drm_connector *connector; | |
4622 | ||
4623 | if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) | |
4624 | continue; | |
4625 | ||
4626 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | |
4627 | ||
4628 | list_for_each_entry(connector, &mode_config->connector_list, head) { | |
4629 | struct intel_connector *intel_connector = to_intel_connector(connector); | |
4630 | ||
4631 | if (intel_connector->encoder->hpd_pin == i) { | |
4632 | if (connector->polled != intel_connector->polled) | |
4633 | DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", | |
4634 | connector->name); | |
4635 | connector->polled = intel_connector->polled; | |
4636 | if (!connector->polled) | |
4637 | connector->polled = DRM_CONNECTOR_POLL_HPD; | |
4638 | } | |
4639 | } | |
4640 | } | |
4641 | if (dev_priv->display.hpd_irq_setup) | |
4642 | dev_priv->display.hpd_irq_setup(dev); | |
4643 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
4644 | ||
4645 | intel_runtime_pm_put(dev_priv); | |
4646 | } | |
4647 | ||
4648 | void intel_irq_init(struct drm_device *dev) | |
4649 | { | |
4650 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4651 | ||
4652 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | |
4653 | INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); | |
4654 | INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); | |
4655 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); | |
4656 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); | |
4657 | ||
4658 | /* Let's track the enabled rps events */ | |
4659 | if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) | |
4660 | /* WaGsvRC0ResidencyMethod:vlv */ | |
4661 | dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; | |
4662 | else | |
4663 | dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; | |
4664 | ||
4665 | setup_timer(&dev_priv->gpu_error.hangcheck_timer, | |
4666 | i915_hangcheck_elapsed, | |
4667 | (unsigned long) dev); | |
4668 | INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, | |
4669 | intel_hpd_irq_reenable); | |
4670 | ||
4671 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); | |
4672 | ||
4673 | /* Haven't installed the IRQ handler yet */ | |
4674 | dev_priv->pm._irqs_disabled = true; | |
4675 | ||
4676 | if (IS_GEN2(dev)) { | |
4677 | dev->max_vblank_count = 0; | |
4678 | dev->driver->get_vblank_counter = i8xx_get_vblank_counter; | |
4679 | } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | |
4680 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ | |
4681 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | |
4682 | } else { | |
4683 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | |
4684 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | |
4685 | } | |
4686 | ||
4687 | /* | |
4688 | * Opt out of the vblank disable timer on everything except gen2. | |
4689 | * Gen2 doesn't have a hardware frame counter and so depends on | |
4690 | * vblank interrupts to produce sane vblank seuquence numbers. | |
4691 | */ | |
4692 | if (!IS_GEN2(dev)) | |
4693 | dev->vblank_disable_immediate = true; | |
4694 | ||
4695 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | |
4696 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; | |
4697 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; | |
4698 | } | |
4699 | ||
4700 | if (IS_CHERRYVIEW(dev)) { | |
4701 | dev->driver->irq_handler = cherryview_irq_handler; | |
4702 | dev->driver->irq_preinstall = cherryview_irq_preinstall; | |
4703 | dev->driver->irq_postinstall = cherryview_irq_postinstall; | |
4704 | dev->driver->irq_uninstall = cherryview_irq_uninstall; | |
4705 | dev->driver->enable_vblank = valleyview_enable_vblank; | |
4706 | dev->driver->disable_vblank = valleyview_disable_vblank; | |
4707 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; | |
4708 | } else if (IS_VALLEYVIEW(dev)) { | |
4709 | dev->driver->irq_handler = valleyview_irq_handler; | |
4710 | dev->driver->irq_preinstall = valleyview_irq_preinstall; | |
4711 | dev->driver->irq_postinstall = valleyview_irq_postinstall; | |
4712 | dev->driver->irq_uninstall = valleyview_irq_uninstall; | |
4713 | dev->driver->enable_vblank = valleyview_enable_vblank; | |
4714 | dev->driver->disable_vblank = valleyview_disable_vblank; | |
4715 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; | |
4716 | } else if (IS_GEN8(dev)) { | |
4717 | dev->driver->irq_handler = gen8_irq_handler; | |
4718 | dev->driver->irq_preinstall = gen8_irq_reset; | |
4719 | dev->driver->irq_postinstall = gen8_irq_postinstall; | |
4720 | dev->driver->irq_uninstall = gen8_irq_uninstall; | |
4721 | dev->driver->enable_vblank = gen8_enable_vblank; | |
4722 | dev->driver->disable_vblank = gen8_disable_vblank; | |
4723 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; | |
4724 | } else if (HAS_PCH_SPLIT(dev)) { | |
4725 | dev->driver->irq_handler = ironlake_irq_handler; | |
4726 | dev->driver->irq_preinstall = ironlake_irq_reset; | |
4727 | dev->driver->irq_postinstall = ironlake_irq_postinstall; | |
4728 | dev->driver->irq_uninstall = ironlake_irq_uninstall; | |
4729 | dev->driver->enable_vblank = ironlake_enable_vblank; | |
4730 | dev->driver->disable_vblank = ironlake_disable_vblank; | |
4731 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; | |
4732 | } else { | |
4733 | if (INTEL_INFO(dev)->gen == 2) { | |
4734 | dev->driver->irq_preinstall = i8xx_irq_preinstall; | |
4735 | dev->driver->irq_postinstall = i8xx_irq_postinstall; | |
4736 | dev->driver->irq_handler = i8xx_irq_handler; | |
4737 | dev->driver->irq_uninstall = i8xx_irq_uninstall; | |
4738 | } else if (INTEL_INFO(dev)->gen == 3) { | |
4739 | dev->driver->irq_preinstall = i915_irq_preinstall; | |
4740 | dev->driver->irq_postinstall = i915_irq_postinstall; | |
4741 | dev->driver->irq_uninstall = i915_irq_uninstall; | |
4742 | dev->driver->irq_handler = i915_irq_handler; | |
4743 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; | |
4744 | } else { | |
4745 | dev->driver->irq_preinstall = i965_irq_preinstall; | |
4746 | dev->driver->irq_postinstall = i965_irq_postinstall; | |
4747 | dev->driver->irq_uninstall = i965_irq_uninstall; | |
4748 | dev->driver->irq_handler = i965_irq_handler; | |
4749 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; | |
4750 | } | |
4751 | dev->driver->enable_vblank = i915_enable_vblank; | |
4752 | dev->driver->disable_vblank = i915_disable_vblank; | |
4753 | } | |
4754 | } | |
4755 | ||
4756 | void intel_hpd_init(struct drm_device *dev) | |
4757 | { | |
4758 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4759 | struct drm_mode_config *mode_config = &dev->mode_config; | |
4760 | struct drm_connector *connector; | |
4761 | unsigned long irqflags; | |
4762 | int i; | |
4763 | ||
4764 | for (i = 1; i < HPD_NUM_PINS; i++) { | |
4765 | dev_priv->hpd_stats[i].hpd_cnt = 0; | |
4766 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | |
4767 | } | |
4768 | list_for_each_entry(connector, &mode_config->connector_list, head) { | |
4769 | struct intel_connector *intel_connector = to_intel_connector(connector); | |
4770 | connector->polled = intel_connector->polled; | |
4771 | if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) | |
4772 | connector->polled = DRM_CONNECTOR_POLL_HPD; | |
4773 | if (intel_connector->mst_port) | |
4774 | connector->polled = DRM_CONNECTOR_POLL_HPD; | |
4775 | } | |
4776 | ||
4777 | /* Interrupt setup is already guaranteed to be single-threaded, this is | |
4778 | * just to make the assert_spin_locked checks happy. */ | |
4779 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
4780 | if (dev_priv->display.hpd_irq_setup) | |
4781 | dev_priv->display.hpd_irq_setup(dev); | |
4782 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
4783 | } | |
4784 | ||
4785 | /* Disable interrupts so we can allow runtime PM. */ | |
4786 | void intel_runtime_pm_disable_interrupts(struct drm_device *dev) | |
4787 | { | |
4788 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4789 | ||
4790 | dev->driver->irq_uninstall(dev); | |
4791 | dev_priv->pm._irqs_disabled = true; | |
4792 | } | |
4793 | ||
4794 | /* Restore interrupts so we can recover from runtime PM. */ | |
4795 | void intel_runtime_pm_restore_interrupts(struct drm_device *dev) | |
4796 | { | |
4797 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4798 | ||
4799 | dev_priv->pm._irqs_disabled = false; | |
4800 | dev->driver->irq_preinstall(dev); | |
4801 | dev->driver->irq_postinstall(dev); | |
4802 | } |