]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_irq.c
drm/i915: enable self-refresh on 965
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_irq.c
CommitLineData
0d6aa60b 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1da177e4 2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
63eeaf38 29#include <linux/sysrq.h>
1da177e4
LT
30#include "drmP.h"
31#include "drm.h"
32#include "i915_drm.h"
33#include "i915_drv.h"
1c5d22f7 34#include "i915_trace.h"
79e53945 35#include "intel_drv.h"
1da177e4 36
1da177e4 37#define MAX_NOPID ((u32)~0)
1da177e4 38
7c463586
KP
39/**
40 * Interrupts that are always left unmasked.
41 *
42 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
43 * we leave them always unmasked in IMR and then control enabling them through
44 * PIPESTAT alone.
45 */
63eeaf38
JB
46#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
47 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
48 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
49 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
7c463586
KP
50
51/** Interrupts that we mask and unmask at runtime. */
52#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
53
79e53945
JB
54#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
55 PIPE_VBLANK_INTERRUPT_STATUS)
56
57#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
58 PIPE_VBLANK_INTERRUPT_ENABLE)
59
60#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
61 DRM_I915_VBLANK_PIPE_B)
62
036a4a7d
ZW
63void
64igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
65{
66 if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
67 dev_priv->gt_irq_mask_reg &= ~mask;
68 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
69 (void) I915_READ(GTIMR);
70 }
71}
72
73static inline void
74igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
75{
76 if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
77 dev_priv->gt_irq_mask_reg |= mask;
78 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
79 (void) I915_READ(GTIMR);
80 }
81}
82
83/* For display hotplug interrupt */
84void
85igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
86{
87 if ((dev_priv->irq_mask_reg & mask) != 0) {
88 dev_priv->irq_mask_reg &= ~mask;
89 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
90 (void) I915_READ(DEIMR);
91 }
92}
93
94static inline void
95igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
96{
97 if ((dev_priv->irq_mask_reg & mask) != mask) {
98 dev_priv->irq_mask_reg |= mask;
99 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
100 (void) I915_READ(DEIMR);
101 }
102}
103
8ee1c3db 104void
ed4cb414
EA
105i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
106{
107 if ((dev_priv->irq_mask_reg & mask) != 0) {
108 dev_priv->irq_mask_reg &= ~mask;
109 I915_WRITE(IMR, dev_priv->irq_mask_reg);
110 (void) I915_READ(IMR);
111 }
112}
113
114static inline void
115i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
116{
117 if ((dev_priv->irq_mask_reg & mask) != mask) {
118 dev_priv->irq_mask_reg |= mask;
119 I915_WRITE(IMR, dev_priv->irq_mask_reg);
120 (void) I915_READ(IMR);
121 }
122}
123
7c463586
KP
124static inline u32
125i915_pipestat(int pipe)
126{
127 if (pipe == 0)
128 return PIPEASTAT;
129 if (pipe == 1)
130 return PIPEBSTAT;
9c84ba4e 131 BUG();
7c463586
KP
132}
133
134void
135i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
136{
137 if ((dev_priv->pipestat[pipe] & mask) != mask) {
138 u32 reg = i915_pipestat(pipe);
139
140 dev_priv->pipestat[pipe] |= mask;
141 /* Enable the interrupt, clear any pending status */
142 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
143 (void) I915_READ(reg);
144 }
145}
146
147void
148i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
149{
150 if ((dev_priv->pipestat[pipe] & mask) != 0) {
151 u32 reg = i915_pipestat(pipe);
152
153 dev_priv->pipestat[pipe] &= ~mask;
154 I915_WRITE(reg, dev_priv->pipestat[pipe]);
155 (void) I915_READ(reg);
156 }
157}
158
0a3e67a4
JB
159/**
160 * i915_pipe_enabled - check if a pipe is enabled
161 * @dev: DRM device
162 * @pipe: pipe to check
163 *
164 * Reading certain registers when the pipe is disabled can hang the chip.
165 * Use this routine to make sure the PLL is running and the pipe is active
166 * before reading such registers if unsure.
167 */
168static int
169i915_pipe_enabled(struct drm_device *dev, int pipe)
170{
171 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
172 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
173
174 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
175 return 1;
176
177 return 0;
178}
179
42f52ef8
KP
180/* Called from drm generic code, passed a 'crtc', which
181 * we use as a pipe index
182 */
183u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
0a3e67a4
JB
184{
185 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
186 unsigned long high_frame;
187 unsigned long low_frame;
188 u32 high1, high2, low, count;
0a3e67a4 189
0a3e67a4
JB
190 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
191 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
192
193 if (!i915_pipe_enabled(dev, pipe)) {
44d98a61
ZY
194 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
195 "pipe %d\n", pipe);
0a3e67a4
JB
196 return 0;
197 }
198
199 /*
200 * High & low register fields aren't synchronized, so make sure
201 * we get a low value that's stable across two reads of the high
202 * register.
203 */
204 do {
205 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
206 PIPE_FRAME_HIGH_SHIFT);
207 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
208 PIPE_FRAME_LOW_SHIFT);
209 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
210 PIPE_FRAME_HIGH_SHIFT);
211 } while (high1 != high2);
212
213 count = (high1 << 8) | low;
214
215 return count;
216}
217
9880b7a5
JB
218u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
219{
220 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
221 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
222
223 if (!i915_pipe_enabled(dev, pipe)) {
44d98a61
ZY
224 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
225 "pipe %d\n", pipe);
9880b7a5
JB
226 return 0;
227 }
228
229 return I915_READ(reg);
230}
231
5ca58282
JB
232/*
233 * Handle hotplug events outside the interrupt handler proper.
234 */
235static void i915_hotplug_work_func(struct work_struct *work)
236{
237 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
238 hotplug_work);
239 struct drm_device *dev = dev_priv->dev;
c31c4ba3
KP
240 struct drm_mode_config *mode_config = &dev->mode_config;
241 struct drm_connector *connector;
242
243 if (mode_config->num_connector) {
244 list_for_each_entry(connector, &mode_config->connector_list, head) {
245 struct intel_output *intel_output = to_intel_output(connector);
246
247 if (intel_output->hot_plug)
248 (*intel_output->hot_plug) (intel_output);
249 }
250 }
5ca58282
JB
251 /* Just fire off a uevent and let userspace tell us what to do */
252 drm_sysfs_hotplug_event(dev);
253}
254
036a4a7d
ZW
255irqreturn_t igdng_irq_handler(struct drm_device *dev)
256{
257 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
258 int ret = IRQ_NONE;
259 u32 de_iir, gt_iir;
260 u32 new_de_iir, new_gt_iir;
261 struct drm_i915_master_private *master_priv;
262
263 de_iir = I915_READ(DEIIR);
264 gt_iir = I915_READ(GTIIR);
265
266 for (;;) {
267 if (de_iir == 0 && gt_iir == 0)
268 break;
269
270 ret = IRQ_HANDLED;
271
272 I915_WRITE(DEIIR, de_iir);
273 new_de_iir = I915_READ(DEIIR);
274 I915_WRITE(GTIIR, gt_iir);
275 new_gt_iir = I915_READ(GTIIR);
276
277 if (dev->primary->master) {
278 master_priv = dev->primary->master->driver_priv;
279 if (master_priv->sarea_priv)
280 master_priv->sarea_priv->last_dispatch =
281 READ_BREADCRUMB(dev_priv);
282 }
283
284 if (gt_iir & GT_USER_INTERRUPT) {
1c5d22f7
CW
285 u32 seqno = i915_get_gem_seqno(dev);
286 dev_priv->mm.irq_gem_seqno = seqno;
287 trace_i915_gem_request_complete(dev, seqno);
036a4a7d
ZW
288 DRM_WAKEUP(&dev_priv->irq_queue);
289 }
290
291 de_iir = new_de_iir;
292 gt_iir = new_gt_iir;
293 }
294
295 return ret;
296}
297
8a905236
JB
298/**
299 * i915_error_work_func - do process context error handling work
300 * @work: work struct
301 *
302 * Fire an error uevent so userspace can see that a hang or error
303 * was detected.
304 */
305static void i915_error_work_func(struct work_struct *work)
306{
307 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
308 error_work);
309 struct drm_device *dev = dev_priv->dev;
f316a42c
BG
310 char *error_event[] = { "ERROR=1", NULL };
311 char *reset_event[] = { "RESET=1", NULL };
312 char *reset_done_event[] = { "ERROR=0", NULL };
8a905236 313
44d98a61 314 DRM_DEBUG_DRIVER("generating error event\n");
f316a42c
BG
315 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
316
ba1234d1 317 if (atomic_read(&dev_priv->mm.wedged)) {
f316a42c 318 if (IS_I965G(dev)) {
44d98a61 319 DRM_DEBUG_DRIVER("resetting chip\n");
f316a42c
BG
320 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
321 if (!i965_reset(dev, GDRST_RENDER)) {
ba1234d1 322 atomic_set(&dev_priv->mm.wedged, 0);
f316a42c
BG
323 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
324 }
325 } else {
44d98a61 326 DRM_DEBUG_DRIVER("reboot required\n");
f316a42c
BG
327 }
328 }
8a905236
JB
329}
330
331/**
332 * i915_capture_error_state - capture an error record for later analysis
333 * @dev: drm device
334 *
335 * Should be called when an error is detected (either a hang or an error
336 * interrupt) to capture error state from the time of the error. Fills
337 * out a structure which becomes available in debugfs for user level tools
338 * to pick up.
339 */
63eeaf38
JB
340static void i915_capture_error_state(struct drm_device *dev)
341{
342 struct drm_i915_private *dev_priv = dev->dev_private;
343 struct drm_i915_error_state *error;
344 unsigned long flags;
345
346 spin_lock_irqsave(&dev_priv->error_lock, flags);
347 if (dev_priv->first_error)
348 goto out;
349
350 error = kmalloc(sizeof(*error), GFP_ATOMIC);
351 if (!error) {
44d98a61 352 DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n");
63eeaf38
JB
353 goto out;
354 }
355
356 error->eir = I915_READ(EIR);
357 error->pgtbl_er = I915_READ(PGTBL_ER);
358 error->pipeastat = I915_READ(PIPEASTAT);
359 error->pipebstat = I915_READ(PIPEBSTAT);
360 error->instpm = I915_READ(INSTPM);
361 if (!IS_I965G(dev)) {
362 error->ipeir = I915_READ(IPEIR);
363 error->ipehr = I915_READ(IPEHR);
364 error->instdone = I915_READ(INSTDONE);
365 error->acthd = I915_READ(ACTHD);
366 } else {
367 error->ipeir = I915_READ(IPEIR_I965);
368 error->ipehr = I915_READ(IPEHR_I965);
369 error->instdone = I915_READ(INSTDONE_I965);
370 error->instps = I915_READ(INSTPS);
371 error->instdone1 = I915_READ(INSTDONE1);
372 error->acthd = I915_READ(ACTHD_I965);
373 }
374
8a905236
JB
375 do_gettimeofday(&error->time);
376
63eeaf38
JB
377 dev_priv->first_error = error;
378
379out:
380 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
381}
382
8a905236
JB
383/**
384 * i915_handle_error - handle an error interrupt
385 * @dev: drm device
386 *
387 * Do some basic checking of regsiter state at error interrupt time and
388 * dump it to the syslog. Also call i915_capture_error_state() to make
389 * sure we get a record and make it available in debugfs. Fire a uevent
390 * so userspace knows something bad happened (should trigger collection
391 * of a ring dump etc.).
392 */
ba1234d1 393static void i915_handle_error(struct drm_device *dev, bool wedged)
8a905236
JB
394{
395 struct drm_i915_private *dev_priv = dev->dev_private;
396 u32 eir = I915_READ(EIR);
397 u32 pipea_stats = I915_READ(PIPEASTAT);
398 u32 pipeb_stats = I915_READ(PIPEBSTAT);
399
400 i915_capture_error_state(dev);
401
402 printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
403 eir);
404
405 if (IS_G4X(dev)) {
406 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
407 u32 ipeir = I915_READ(IPEIR_I965);
408
409 printk(KERN_ERR " IPEIR: 0x%08x\n",
410 I915_READ(IPEIR_I965));
411 printk(KERN_ERR " IPEHR: 0x%08x\n",
412 I915_READ(IPEHR_I965));
413 printk(KERN_ERR " INSTDONE: 0x%08x\n",
414 I915_READ(INSTDONE_I965));
415 printk(KERN_ERR " INSTPS: 0x%08x\n",
416 I915_READ(INSTPS));
417 printk(KERN_ERR " INSTDONE1: 0x%08x\n",
418 I915_READ(INSTDONE1));
419 printk(KERN_ERR " ACTHD: 0x%08x\n",
420 I915_READ(ACTHD_I965));
421 I915_WRITE(IPEIR_I965, ipeir);
422 (void)I915_READ(IPEIR_I965);
423 }
424 if (eir & GM45_ERROR_PAGE_TABLE) {
425 u32 pgtbl_err = I915_READ(PGTBL_ER);
426 printk(KERN_ERR "page table error\n");
427 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
428 pgtbl_err);
429 I915_WRITE(PGTBL_ER, pgtbl_err);
430 (void)I915_READ(PGTBL_ER);
431 }
432 }
433
434 if (IS_I9XX(dev)) {
435 if (eir & I915_ERROR_PAGE_TABLE) {
436 u32 pgtbl_err = I915_READ(PGTBL_ER);
437 printk(KERN_ERR "page table error\n");
438 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
439 pgtbl_err);
440 I915_WRITE(PGTBL_ER, pgtbl_err);
441 (void)I915_READ(PGTBL_ER);
442 }
443 }
444
445 if (eir & I915_ERROR_MEMORY_REFRESH) {
446 printk(KERN_ERR "memory refresh error\n");
447 printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
448 pipea_stats);
449 printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
450 pipeb_stats);
451 /* pipestat has already been acked */
452 }
453 if (eir & I915_ERROR_INSTRUCTION) {
454 printk(KERN_ERR "instruction error\n");
455 printk(KERN_ERR " INSTPM: 0x%08x\n",
456 I915_READ(INSTPM));
457 if (!IS_I965G(dev)) {
458 u32 ipeir = I915_READ(IPEIR);
459
460 printk(KERN_ERR " IPEIR: 0x%08x\n",
461 I915_READ(IPEIR));
462 printk(KERN_ERR " IPEHR: 0x%08x\n",
463 I915_READ(IPEHR));
464 printk(KERN_ERR " INSTDONE: 0x%08x\n",
465 I915_READ(INSTDONE));
466 printk(KERN_ERR " ACTHD: 0x%08x\n",
467 I915_READ(ACTHD));
468 I915_WRITE(IPEIR, ipeir);
469 (void)I915_READ(IPEIR);
470 } else {
471 u32 ipeir = I915_READ(IPEIR_I965);
472
473 printk(KERN_ERR " IPEIR: 0x%08x\n",
474 I915_READ(IPEIR_I965));
475 printk(KERN_ERR " IPEHR: 0x%08x\n",
476 I915_READ(IPEHR_I965));
477 printk(KERN_ERR " INSTDONE: 0x%08x\n",
478 I915_READ(INSTDONE_I965));
479 printk(KERN_ERR " INSTPS: 0x%08x\n",
480 I915_READ(INSTPS));
481 printk(KERN_ERR " INSTDONE1: 0x%08x\n",
482 I915_READ(INSTDONE1));
483 printk(KERN_ERR " ACTHD: 0x%08x\n",
484 I915_READ(ACTHD_I965));
485 I915_WRITE(IPEIR_I965, ipeir);
486 (void)I915_READ(IPEIR_I965);
487 }
488 }
489
490 I915_WRITE(EIR, eir);
491 (void)I915_READ(EIR);
492 eir = I915_READ(EIR);
493 if (eir) {
494 /*
495 * some errors might have become stuck,
496 * mask them.
497 */
498 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
499 I915_WRITE(EMR, I915_READ(EMR) | eir);
500 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
501 }
502
ba1234d1
BG
503 if (wedged) {
504 atomic_set(&dev_priv->mm.wedged, 1);
505
11ed50ec
BG
506 /*
507 * Wakeup waiting processes so they don't hang
508 */
509 printk("i915: Waking up sleeping processes\n");
510 DRM_WAKEUP(&dev_priv->irq_queue);
511 }
512
9c9fe1f8 513 queue_work(dev_priv->wq, &dev_priv->error_work);
8a905236
JB
514}
515
1da177e4
LT
516irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
517{
84b1fd10 518 struct drm_device *dev = (struct drm_device *) arg;
1da177e4 519 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
7c1c2871 520 struct drm_i915_master_private *master_priv;
cdfbc41f
EA
521 u32 iir, new_iir;
522 u32 pipea_stats, pipeb_stats;
05eff845
KP
523 u32 vblank_status;
524 u32 vblank_enable;
0a3e67a4 525 int vblank = 0;
7c463586 526 unsigned long irqflags;
05eff845
KP
527 int irq_received;
528 int ret = IRQ_NONE;
6e5fca53 529
630681d9
EA
530 atomic_inc(&dev_priv->irq_received);
531
036a4a7d
ZW
532 if (IS_IGDNG(dev))
533 return igdng_irq_handler(dev);
534
ed4cb414 535 iir = I915_READ(IIR);
a6b54f3f 536
05eff845
KP
537 if (IS_I965G(dev)) {
538 vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
539 vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
540 } else {
541 vblank_status = I915_VBLANK_INTERRUPT_STATUS;
542 vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
543 }
af6061af 544
05eff845
KP
545 for (;;) {
546 irq_received = iir != 0;
547
548 /* Can't rely on pipestat interrupt bit in iir as it might
549 * have been cleared after the pipestat interrupt was received.
550 * It doesn't set the bit in iir again, but it still produces
551 * interrupts (for non-MSI).
552 */
553 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
554 pipea_stats = I915_READ(PIPEASTAT);
555 pipeb_stats = I915_READ(PIPEBSTAT);
79e53945 556
8a905236 557 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
ba1234d1 558 i915_handle_error(dev, false);
8a905236 559
cdfbc41f
EA
560 /*
561 * Clear the PIPE(A|B)STAT regs before the IIR
562 */
05eff845 563 if (pipea_stats & 0x8000ffff) {
7662c8bd 564 if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
44d98a61 565 DRM_DEBUG_DRIVER("pipe a underrun\n");
cdfbc41f 566 I915_WRITE(PIPEASTAT, pipea_stats);
05eff845 567 irq_received = 1;
cdfbc41f 568 }
1da177e4 569
05eff845 570 if (pipeb_stats & 0x8000ffff) {
7662c8bd 571 if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
44d98a61 572 DRM_DEBUG_DRIVER("pipe b underrun\n");
cdfbc41f 573 I915_WRITE(PIPEBSTAT, pipeb_stats);
05eff845 574 irq_received = 1;
cdfbc41f 575 }
05eff845
KP
576 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
577
578 if (!irq_received)
579 break;
580
581 ret = IRQ_HANDLED;
8ee1c3db 582
5ca58282
JB
583 /* Consume port. Then clear IIR or we'll miss events */
584 if ((I915_HAS_HOTPLUG(dev)) &&
585 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
586 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
587
44d98a61 588 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
5ca58282
JB
589 hotplug_status);
590 if (hotplug_status & dev_priv->hotplug_supported_mask)
9c9fe1f8
EA
591 queue_work(dev_priv->wq,
592 &dev_priv->hotplug_work);
5ca58282
JB
593
594 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
595 I915_READ(PORT_HOTPLUG_STAT);
04302965
SL
596
597 /* EOS interrupts occurs */
598 if (IS_IGD(dev) &&
599 (hotplug_status & CRT_EOS_INT_STATUS)) {
600 u32 temp;
601
44d98a61 602 DRM_DEBUG_DRIVER("EOS interrupt occurs\n");
04302965
SL
603 /* status is already cleared */
604 temp = I915_READ(ADPA);
605 temp &= ~ADPA_DAC_ENABLE;
606 I915_WRITE(ADPA, temp);
607
608 temp = I915_READ(PORT_HOTPLUG_EN);
609 temp &= ~CRT_EOS_INT_EN;
610 I915_WRITE(PORT_HOTPLUG_EN, temp);
611
612 temp = I915_READ(PORT_HOTPLUG_STAT);
613 if (temp & CRT_EOS_INT_STATUS)
614 I915_WRITE(PORT_HOTPLUG_STAT,
615 CRT_EOS_INT_STATUS);
616 }
5ca58282
JB
617 }
618
cdfbc41f
EA
619 I915_WRITE(IIR, iir);
620 new_iir = I915_READ(IIR); /* Flush posted writes */
7c463586 621
7c1c2871
DA
622 if (dev->primary->master) {
623 master_priv = dev->primary->master->driver_priv;
624 if (master_priv->sarea_priv)
625 master_priv->sarea_priv->last_dispatch =
626 READ_BREADCRUMB(dev_priv);
627 }
0a3e67a4 628
cdfbc41f 629 if (iir & I915_USER_INTERRUPT) {
1c5d22f7
CW
630 u32 seqno = i915_get_gem_seqno(dev);
631 dev_priv->mm.irq_gem_seqno = seqno;
632 trace_i915_gem_request_complete(dev, seqno);
cdfbc41f 633 DRM_WAKEUP(&dev_priv->irq_queue);
f65d9421
BG
634 dev_priv->hangcheck_count = 0;
635 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
cdfbc41f 636 }
673a394b 637
05eff845 638 if (pipea_stats & vblank_status) {
cdfbc41f
EA
639 vblank++;
640 drm_handle_vblank(dev, 0);
641 }
7c463586 642
05eff845 643 if (pipeb_stats & vblank_status) {
cdfbc41f
EA
644 vblank++;
645 drm_handle_vblank(dev, 1);
646 }
7c463586 647
cdfbc41f
EA
648 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
649 (iir & I915_ASLE_INTERRUPT))
650 opregion_asle_intr(dev);
651
652 /* With MSI, interrupts are only generated when iir
653 * transitions from zero to nonzero. If another bit got
654 * set while we were handling the existing iir bits, then
655 * we would never get another interrupt.
656 *
657 * This is fine on non-MSI as well, as if we hit this path
658 * we avoid exiting the interrupt handler only to generate
659 * another one.
660 *
661 * Note that for MSI this could cause a stray interrupt report
662 * if an interrupt landed in the time between writing IIR and
663 * the posting read. This should be rare enough to never
664 * trigger the 99% of 100,000 interrupts test for disabling
665 * stray interrupts.
666 */
667 iir = new_iir;
05eff845 668 }
0a3e67a4 669
05eff845 670 return ret;
1da177e4
LT
671}
672
af6061af 673static int i915_emit_irq(struct drm_device * dev)
1da177e4
LT
674{
675 drm_i915_private_t *dev_priv = dev->dev_private;
7c1c2871 676 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1da177e4
LT
677 RING_LOCALS;
678
679 i915_kernel_lost_context(dev);
680
44d98a61 681 DRM_DEBUG_DRIVER("\n");
1da177e4 682
c99b058f 683 dev_priv->counter++;
c29b669c 684 if (dev_priv->counter > 0x7FFFFFFFUL)
c99b058f 685 dev_priv->counter = 1;
7c1c2871
DA
686 if (master_priv->sarea_priv)
687 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
c29b669c 688
0baf823a 689 BEGIN_LP_RING(4);
585fb111 690 OUT_RING(MI_STORE_DWORD_INDEX);
0baf823a 691 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
c29b669c 692 OUT_RING(dev_priv->counter);
585fb111 693 OUT_RING(MI_USER_INTERRUPT);
1da177e4 694 ADVANCE_LP_RING();
bc5f4523 695
c29b669c 696 return dev_priv->counter;
1da177e4
LT
697}
698
673a394b 699void i915_user_irq_get(struct drm_device *dev)
ed4cb414
EA
700{
701 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e9d21d7f 702 unsigned long irqflags;
ed4cb414 703
e9d21d7f 704 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
036a4a7d
ZW
705 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
706 if (IS_IGDNG(dev))
707 igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
708 else
709 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
710 }
e9d21d7f 711 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
ed4cb414
EA
712}
713
0a3e67a4 714void i915_user_irq_put(struct drm_device *dev)
ed4cb414
EA
715{
716 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e9d21d7f 717 unsigned long irqflags;
ed4cb414 718
e9d21d7f 719 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
ed4cb414 720 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
036a4a7d
ZW
721 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
722 if (IS_IGDNG(dev))
723 igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
724 else
725 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
726 }
e9d21d7f 727 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
ed4cb414
EA
728}
729
9d34e5db
CW
730void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
731{
732 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
733
734 if (dev_priv->trace_irq_seqno == 0)
735 i915_user_irq_get(dev);
736
737 dev_priv->trace_irq_seqno = seqno;
738}
739
84b1fd10 740static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1da177e4
LT
741{
742 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
7c1c2871 743 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1da177e4
LT
744 int ret = 0;
745
44d98a61 746 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
1da177e4
LT
747 READ_BREADCRUMB(dev_priv));
748
ed4cb414 749 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
7c1c2871
DA
750 if (master_priv->sarea_priv)
751 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1da177e4 752 return 0;
ed4cb414 753 }
1da177e4 754
7c1c2871
DA
755 if (master_priv->sarea_priv)
756 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1da177e4 757
ed4cb414 758 i915_user_irq_get(dev);
1da177e4
LT
759 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
760 READ_BREADCRUMB(dev_priv) >= irq_nr);
ed4cb414 761 i915_user_irq_put(dev);
1da177e4 762
20caafa6 763 if (ret == -EBUSY) {
3e684eae 764 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1da177e4
LT
765 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
766 }
767
af6061af
DA
768 return ret;
769}
770
1da177e4
LT
771/* Needs the lock as it touches the ring.
772 */
c153f45f
EA
773int i915_irq_emit(struct drm_device *dev, void *data,
774 struct drm_file *file_priv)
1da177e4 775{
1da177e4 776 drm_i915_private_t *dev_priv = dev->dev_private;
c153f45f 777 drm_i915_irq_emit_t *emit = data;
1da177e4
LT
778 int result;
779
07f4f8bf 780 if (!dev_priv || !dev_priv->ring.virtual_start) {
3e684eae 781 DRM_ERROR("called with no initialization\n");
20caafa6 782 return -EINVAL;
1da177e4 783 }
299eb93c
EA
784
785 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
786
546b0974 787 mutex_lock(&dev->struct_mutex);
1da177e4 788 result = i915_emit_irq(dev);
546b0974 789 mutex_unlock(&dev->struct_mutex);
1da177e4 790
c153f45f 791 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
1da177e4 792 DRM_ERROR("copy_to_user\n");
20caafa6 793 return -EFAULT;
1da177e4
LT
794 }
795
796 return 0;
797}
798
799/* Doesn't need the hardware lock.
800 */
c153f45f
EA
801int i915_irq_wait(struct drm_device *dev, void *data,
802 struct drm_file *file_priv)
1da177e4 803{
1da177e4 804 drm_i915_private_t *dev_priv = dev->dev_private;
c153f45f 805 drm_i915_irq_wait_t *irqwait = data;
1da177e4
LT
806
807 if (!dev_priv) {
3e684eae 808 DRM_ERROR("called with no initialization\n");
20caafa6 809 return -EINVAL;
1da177e4
LT
810 }
811
c153f45f 812 return i915_wait_irq(dev, irqwait->irq_seq);
1da177e4
LT
813}
814
42f52ef8
KP
815/* Called from drm generic code, passed 'crtc' which
816 * we use as a pipe index
817 */
818int i915_enable_vblank(struct drm_device *dev, int pipe)
0a3e67a4
JB
819{
820 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e9d21d7f 821 unsigned long irqflags;
71e0ffa5
JB
822 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
823 u32 pipeconf;
824
825 pipeconf = I915_READ(pipeconf_reg);
826 if (!(pipeconf & PIPEACONF_ENABLE))
827 return -EINVAL;
0a3e67a4 828
036a4a7d
ZW
829 if (IS_IGDNG(dev))
830 return 0;
831
e9d21d7f 832 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
e9d21d7f 833 if (IS_I965G(dev))
7c463586
KP
834 i915_enable_pipestat(dev_priv, pipe,
835 PIPE_START_VBLANK_INTERRUPT_ENABLE);
e9d21d7f 836 else
7c463586
KP
837 i915_enable_pipestat(dev_priv, pipe,
838 PIPE_VBLANK_INTERRUPT_ENABLE);
e9d21d7f 839 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
0a3e67a4
JB
840 return 0;
841}
842
42f52ef8
KP
843/* Called from drm generic code, passed 'crtc' which
844 * we use as a pipe index
845 */
846void i915_disable_vblank(struct drm_device *dev, int pipe)
0a3e67a4
JB
847{
848 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e9d21d7f 849 unsigned long irqflags;
0a3e67a4 850
036a4a7d
ZW
851 if (IS_IGDNG(dev))
852 return;
853
e9d21d7f 854 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
7c463586
KP
855 i915_disable_pipestat(dev_priv, pipe,
856 PIPE_VBLANK_INTERRUPT_ENABLE |
857 PIPE_START_VBLANK_INTERRUPT_ENABLE);
e9d21d7f 858 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
0a3e67a4
JB
859}
860
79e53945
JB
861void i915_enable_interrupt (struct drm_device *dev)
862{
863 struct drm_i915_private *dev_priv = dev->dev_private;
e170b030
ZW
864
865 if (!IS_IGDNG(dev))
866 opregion_enable_asle(dev);
79e53945
JB
867 dev_priv->irq_enabled = 1;
868}
869
870
702880f2
DA
871/* Set the vblank monitor pipe
872 */
c153f45f
EA
873int i915_vblank_pipe_set(struct drm_device *dev, void *data,
874 struct drm_file *file_priv)
702880f2 875{
702880f2 876 drm_i915_private_t *dev_priv = dev->dev_private;
702880f2
DA
877
878 if (!dev_priv) {
3e684eae 879 DRM_ERROR("called with no initialization\n");
20caafa6 880 return -EINVAL;
702880f2
DA
881 }
882
5b51694a 883 return 0;
702880f2
DA
884}
885
c153f45f
EA
886int i915_vblank_pipe_get(struct drm_device *dev, void *data,
887 struct drm_file *file_priv)
702880f2 888{
702880f2 889 drm_i915_private_t *dev_priv = dev->dev_private;
c153f45f 890 drm_i915_vblank_pipe_t *pipe = data;
702880f2
DA
891
892 if (!dev_priv) {
3e684eae 893 DRM_ERROR("called with no initialization\n");
20caafa6 894 return -EINVAL;
702880f2
DA
895 }
896
0a3e67a4 897 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
c153f45f 898
702880f2
DA
899 return 0;
900}
901
a6b54f3f
MD
902/**
903 * Schedule buffer swap at given vertical blank.
904 */
c153f45f
EA
905int i915_vblank_swap(struct drm_device *dev, void *data,
906 struct drm_file *file_priv)
a6b54f3f 907{
bd95e0a4
EA
908 /* The delayed swap mechanism was fundamentally racy, and has been
909 * removed. The model was that the client requested a delayed flip/swap
910 * from the kernel, then waited for vblank before continuing to perform
911 * rendering. The problem was that the kernel might wake the client
912 * up before it dispatched the vblank swap (since the lock has to be
913 * held while touching the ringbuffer), in which case the client would
914 * clear and start the next frame before the swap occurred, and
915 * flicker would occur in addition to likely missing the vblank.
916 *
917 * In the absence of this ioctl, userland falls back to a correct path
918 * of waiting for a vblank, then dispatching the swap on its own.
919 * Context switching to userland and back is plenty fast enough for
920 * meeting the requirements of vblank swapping.
0a3e67a4 921 */
bd95e0a4 922 return -EINVAL;
a6b54f3f
MD
923}
924
f65d9421
BG
925struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
926 drm_i915_private_t *dev_priv = dev->dev_private;
927 return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
928}
929
930/**
931 * This is called when the chip hasn't reported back with completed
932 * batchbuffers in a long time. The first time this is called we simply record
933 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
934 * again, we assume the chip is wedged and try to fix it.
935 */
936void i915_hangcheck_elapsed(unsigned long data)
937{
938 struct drm_device *dev = (struct drm_device *)data;
939 drm_i915_private_t *dev_priv = dev->dev_private;
940 uint32_t acthd;
941
942 if (!IS_I965G(dev))
943 acthd = I915_READ(ACTHD);
944 else
945 acthd = I915_READ(ACTHD_I965);
946
947 /* If all work is done then ACTHD clearly hasn't advanced. */
948 if (list_empty(&dev_priv->mm.request_list) ||
949 i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
950 dev_priv->hangcheck_count = 0;
951 return;
952 }
953
954 if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
955 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
ba1234d1 956 i915_handle_error(dev, true);
f65d9421
BG
957 return;
958 }
959
960 /* Reset timer case chip hangs without another request being added */
961 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
962
963 if (acthd != dev_priv->last_acthd)
964 dev_priv->hangcheck_count = 0;
965 else
966 dev_priv->hangcheck_count++;
967
968 dev_priv->last_acthd = acthd;
969}
970
1da177e4
LT
971/* drm_dma.h hooks
972*/
036a4a7d
ZW
973static void igdng_irq_preinstall(struct drm_device *dev)
974{
975 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
976
977 I915_WRITE(HWSTAM, 0xeffe);
978
979 /* XXX hotplug from PCH */
980
981 I915_WRITE(DEIMR, 0xffffffff);
982 I915_WRITE(DEIER, 0x0);
983 (void) I915_READ(DEIER);
984
985 /* and GT */
986 I915_WRITE(GTIMR, 0xffffffff);
987 I915_WRITE(GTIER, 0x0);
988 (void) I915_READ(GTIER);
989}
990
991static int igdng_irq_postinstall(struct drm_device *dev)
992{
993 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
994 /* enable kind of interrupts always enabled */
995 u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */;
996 u32 render_mask = GT_USER_INTERRUPT;
997
998 dev_priv->irq_mask_reg = ~display_mask;
999 dev_priv->de_irq_enable_reg = display_mask;
1000
1001 /* should always can generate irq */
1002 I915_WRITE(DEIIR, I915_READ(DEIIR));
1003 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
1004 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
1005 (void) I915_READ(DEIER);
1006
1007 /* user interrupt should be enabled, but masked initial */
1008 dev_priv->gt_irq_mask_reg = 0xffffffff;
1009 dev_priv->gt_irq_enable_reg = render_mask;
1010
1011 I915_WRITE(GTIIR, I915_READ(GTIIR));
1012 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
1013 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
1014 (void) I915_READ(GTIER);
1015
1016 return 0;
1017}
1018
84b1fd10 1019void i915_driver_irq_preinstall(struct drm_device * dev)
1da177e4
LT
1020{
1021 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1022
79e53945
JB
1023 atomic_set(&dev_priv->irq_received, 0);
1024
036a4a7d 1025 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
8a905236 1026 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
036a4a7d
ZW
1027
1028 if (IS_IGDNG(dev)) {
1029 igdng_irq_preinstall(dev);
1030 return;
1031 }
1032
5ca58282
JB
1033 if (I915_HAS_HOTPLUG(dev)) {
1034 I915_WRITE(PORT_HOTPLUG_EN, 0);
1035 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1036 }
1037
0a3e67a4 1038 I915_WRITE(HWSTAM, 0xeffe);
7c463586
KP
1039 I915_WRITE(PIPEASTAT, 0);
1040 I915_WRITE(PIPEBSTAT, 0);
0a3e67a4 1041 I915_WRITE(IMR, 0xffffffff);
ed4cb414 1042 I915_WRITE(IER, 0x0);
7c463586 1043 (void) I915_READ(IER);
1da177e4
LT
1044}
1045
0a3e67a4 1046int i915_driver_irq_postinstall(struct drm_device *dev)
1da177e4
LT
1047{
1048 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
5ca58282 1049 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
63eeaf38 1050 u32 error_mask;
0a3e67a4 1051
036a4a7d
ZW
1052 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
1053
0a3e67a4 1054 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
0a3e67a4 1055
036a4a7d
ZW
1056 if (IS_IGDNG(dev))
1057 return igdng_irq_postinstall(dev);
1058
7c463586
KP
1059 /* Unmask the interrupts that we always want on. */
1060 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
1061
1062 dev_priv->pipestat[0] = 0;
1063 dev_priv->pipestat[1] = 0;
1064
5ca58282
JB
1065 if (I915_HAS_HOTPLUG(dev)) {
1066 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1067
1068 /* Leave other bits alone */
1069 hotplug_en |= HOTPLUG_EN_MASK;
1070 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1071
1072 dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
1073 TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
1074 SDVOB_HOTPLUG_INT_STATUS;
1075 if (IS_G4X(dev)) {
1076 dev_priv->hotplug_supported_mask |=
1077 HDMIB_HOTPLUG_INT_STATUS |
1078 HDMIC_HOTPLUG_INT_STATUS |
1079 HDMID_HOTPLUG_INT_STATUS;
1080 }
1081 /* Enable in IER... */
1082 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1083 /* and unmask in IMR */
1084 i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT);
1085 }
1086
63eeaf38
JB
1087 /*
1088 * Enable some error detection, note the instruction error mask
1089 * bit is reserved, so we leave it masked.
1090 */
1091 if (IS_G4X(dev)) {
1092 error_mask = ~(GM45_ERROR_PAGE_TABLE |
1093 GM45_ERROR_MEM_PRIV |
1094 GM45_ERROR_CP_PRIV |
1095 I915_ERROR_MEMORY_REFRESH);
1096 } else {
1097 error_mask = ~(I915_ERROR_PAGE_TABLE |
1098 I915_ERROR_MEMORY_REFRESH);
1099 }
1100 I915_WRITE(EMR, error_mask);
1101
7c463586
KP
1102 /* Disable pipe interrupt enables, clear pending pipe status */
1103 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1104 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1105 /* Clear pending interrupt status */
1106 I915_WRITE(IIR, I915_READ(IIR));
8ee1c3db 1107
5ca58282 1108 I915_WRITE(IER, enable_mask);
7c463586 1109 I915_WRITE(IMR, dev_priv->irq_mask_reg);
ed4cb414
EA
1110 (void) I915_READ(IER);
1111
8ee1c3db 1112 opregion_enable_asle(dev);
0a3e67a4
JB
1113
1114 return 0;
1da177e4
LT
1115}
1116
036a4a7d
ZW
1117static void igdng_irq_uninstall(struct drm_device *dev)
1118{
1119 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1120 I915_WRITE(HWSTAM, 0xffffffff);
1121
1122 I915_WRITE(DEIMR, 0xffffffff);
1123 I915_WRITE(DEIER, 0x0);
1124 I915_WRITE(DEIIR, I915_READ(DEIIR));
1125
1126 I915_WRITE(GTIMR, 0xffffffff);
1127 I915_WRITE(GTIER, 0x0);
1128 I915_WRITE(GTIIR, I915_READ(GTIIR));
1129}
1130
84b1fd10 1131void i915_driver_irq_uninstall(struct drm_device * dev)
1da177e4
LT
1132{
1133 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
91e3738e 1134
1da177e4
LT
1135 if (!dev_priv)
1136 return;
1137
0a3e67a4
JB
1138 dev_priv->vblank_pipe = 0;
1139
036a4a7d
ZW
1140 if (IS_IGDNG(dev)) {
1141 igdng_irq_uninstall(dev);
1142 return;
1143 }
1144
5ca58282
JB
1145 if (I915_HAS_HOTPLUG(dev)) {
1146 I915_WRITE(PORT_HOTPLUG_EN, 0);
1147 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1148 }
1149
0a3e67a4 1150 I915_WRITE(HWSTAM, 0xffffffff);
7c463586
KP
1151 I915_WRITE(PIPEASTAT, 0);
1152 I915_WRITE(PIPEBSTAT, 0);
0a3e67a4 1153 I915_WRITE(IMR, 0xffffffff);
ed4cb414 1154 I915_WRITE(IER, 0x0);
af6061af 1155
7c463586
KP
1156 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1157 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1158 I915_WRITE(IIR, I915_READ(IIR));
1da177e4 1159}