]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/i915/i915_irq.c
drm/i915: Add ACPI OpRegion support for Ironlake
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / i915_irq.c
CommitLineData
0d6aa60b 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1da177e4 2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
63eeaf38 29#include <linux/sysrq.h>
1da177e4
LT
30#include "drmP.h"
31#include "drm.h"
32#include "i915_drm.h"
33#include "i915_drv.h"
1c5d22f7 34#include "i915_trace.h"
79e53945 35#include "intel_drv.h"
1da177e4 36
1da177e4 37#define MAX_NOPID ((u32)~0)
1da177e4 38
7c463586
KP
39/**
40 * Interrupts that are always left unmasked.
41 *
42 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
43 * we leave them always unmasked in IMR and then control enabling them through
44 * PIPESTAT alone.
45 */
63eeaf38
JB
46#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
47 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
48 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
49 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
7c463586
KP
50
51/** Interrupts that we mask and unmask at runtime. */
52#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
53
79e53945
JB
54#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
55 PIPE_VBLANK_INTERRUPT_STATUS)
56
57#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
58 PIPE_VBLANK_INTERRUPT_ENABLE)
59
60#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
61 DRM_I915_VBLANK_PIPE_B)
62
036a4a7d
ZW
63void
64igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
65{
66 if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
67 dev_priv->gt_irq_mask_reg &= ~mask;
68 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
69 (void) I915_READ(GTIMR);
70 }
71}
72
73static inline void
74igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
75{
76 if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
77 dev_priv->gt_irq_mask_reg |= mask;
78 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
79 (void) I915_READ(GTIMR);
80 }
81}
82
83/* For display hotplug interrupt */
84void
85igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
86{
87 if ((dev_priv->irq_mask_reg & mask) != 0) {
88 dev_priv->irq_mask_reg &= ~mask;
89 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
90 (void) I915_READ(DEIMR);
91 }
92}
93
94static inline void
95igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
96{
97 if ((dev_priv->irq_mask_reg & mask) != mask) {
98 dev_priv->irq_mask_reg |= mask;
99 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
100 (void) I915_READ(DEIMR);
101 }
102}
103
8ee1c3db 104void
ed4cb414
EA
105i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
106{
107 if ((dev_priv->irq_mask_reg & mask) != 0) {
108 dev_priv->irq_mask_reg &= ~mask;
109 I915_WRITE(IMR, dev_priv->irq_mask_reg);
110 (void) I915_READ(IMR);
111 }
112}
113
114static inline void
115i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
116{
117 if ((dev_priv->irq_mask_reg & mask) != mask) {
118 dev_priv->irq_mask_reg |= mask;
119 I915_WRITE(IMR, dev_priv->irq_mask_reg);
120 (void) I915_READ(IMR);
121 }
122}
123
7c463586
KP
124static inline u32
125i915_pipestat(int pipe)
126{
127 if (pipe == 0)
128 return PIPEASTAT;
129 if (pipe == 1)
130 return PIPEBSTAT;
9c84ba4e 131 BUG();
7c463586
KP
132}
133
134void
135i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
136{
137 if ((dev_priv->pipestat[pipe] & mask) != mask) {
138 u32 reg = i915_pipestat(pipe);
139
140 dev_priv->pipestat[pipe] |= mask;
141 /* Enable the interrupt, clear any pending status */
142 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
143 (void) I915_READ(reg);
144 }
145}
146
147void
148i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
149{
150 if ((dev_priv->pipestat[pipe] & mask) != 0) {
151 u32 reg = i915_pipestat(pipe);
152
153 dev_priv->pipestat[pipe] &= ~mask;
154 I915_WRITE(reg, dev_priv->pipestat[pipe]);
155 (void) I915_READ(reg);
156 }
157}
158
01c66889
ZY
159/**
160 * intel_enable_asle - enable ASLE interrupt for OpRegion
161 */
162void intel_enable_asle (struct drm_device *dev)
163{
164 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
165
166 if (IS_IGDNG(dev))
167 igdng_enable_display_irq(dev_priv, DE_GSE);
168 else
169 i915_enable_pipestat(dev_priv, 1,
170 I915_LEGACY_BLC_EVENT_ENABLE);
171}
172
0a3e67a4
JB
173/**
174 * i915_pipe_enabled - check if a pipe is enabled
175 * @dev: DRM device
176 * @pipe: pipe to check
177 *
178 * Reading certain registers when the pipe is disabled can hang the chip.
179 * Use this routine to make sure the PLL is running and the pipe is active
180 * before reading such registers if unsure.
181 */
182static int
183i915_pipe_enabled(struct drm_device *dev, int pipe)
184{
185 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
186 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
187
188 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
189 return 1;
190
191 return 0;
192}
193
42f52ef8
KP
194/* Called from drm generic code, passed a 'crtc', which
195 * we use as a pipe index
196 */
197u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
0a3e67a4
JB
198{
199 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
200 unsigned long high_frame;
201 unsigned long low_frame;
202 u32 high1, high2, low, count;
0a3e67a4 203
0a3e67a4
JB
204 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
205 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
206
207 if (!i915_pipe_enabled(dev, pipe)) {
44d98a61
ZY
208 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
209 "pipe %d\n", pipe);
0a3e67a4
JB
210 return 0;
211 }
212
213 /*
214 * High & low register fields aren't synchronized, so make sure
215 * we get a low value that's stable across two reads of the high
216 * register.
217 */
218 do {
219 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
220 PIPE_FRAME_HIGH_SHIFT);
221 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
222 PIPE_FRAME_LOW_SHIFT);
223 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
224 PIPE_FRAME_HIGH_SHIFT);
225 } while (high1 != high2);
226
227 count = (high1 << 8) | low;
228
229 return count;
230}
231
9880b7a5
JB
232u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
233{
234 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
235 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
236
237 if (!i915_pipe_enabled(dev, pipe)) {
44d98a61
ZY
238 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
239 "pipe %d\n", pipe);
9880b7a5
JB
240 return 0;
241 }
242
243 return I915_READ(reg);
244}
245
5ca58282
JB
246/*
247 * Handle hotplug events outside the interrupt handler proper.
248 */
249static void i915_hotplug_work_func(struct work_struct *work)
250{
251 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
252 hotplug_work);
253 struct drm_device *dev = dev_priv->dev;
c31c4ba3
KP
254 struct drm_mode_config *mode_config = &dev->mode_config;
255 struct drm_connector *connector;
256
257 if (mode_config->num_connector) {
258 list_for_each_entry(connector, &mode_config->connector_list, head) {
259 struct intel_output *intel_output = to_intel_output(connector);
260
261 if (intel_output->hot_plug)
262 (*intel_output->hot_plug) (intel_output);
263 }
264 }
5ca58282
JB
265 /* Just fire off a uevent and let userspace tell us what to do */
266 drm_sysfs_hotplug_event(dev);
267}
268
036a4a7d
ZW
269irqreturn_t igdng_irq_handler(struct drm_device *dev)
270{
271 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
272 int ret = IRQ_NONE;
273 u32 de_iir, gt_iir;
274 u32 new_de_iir, new_gt_iir;
275 struct drm_i915_master_private *master_priv;
276
277 de_iir = I915_READ(DEIIR);
278 gt_iir = I915_READ(GTIIR);
279
280 for (;;) {
281 if (de_iir == 0 && gt_iir == 0)
282 break;
283
284 ret = IRQ_HANDLED;
285
286 I915_WRITE(DEIIR, de_iir);
287 new_de_iir = I915_READ(DEIIR);
288 I915_WRITE(GTIIR, gt_iir);
289 new_gt_iir = I915_READ(GTIIR);
290
291 if (dev->primary->master) {
292 master_priv = dev->primary->master->driver_priv;
293 if (master_priv->sarea_priv)
294 master_priv->sarea_priv->last_dispatch =
295 READ_BREADCRUMB(dev_priv);
296 }
297
298 if (gt_iir & GT_USER_INTERRUPT) {
1c5d22f7
CW
299 u32 seqno = i915_get_gem_seqno(dev);
300 dev_priv->mm.irq_gem_seqno = seqno;
301 trace_i915_gem_request_complete(dev, seqno);
036a4a7d
ZW
302 DRM_WAKEUP(&dev_priv->irq_queue);
303 }
304
01c66889
ZY
305 if (de_iir & DE_GSE)
306 ironlake_opregion_gse_intr(dev);
307
036a4a7d
ZW
308 de_iir = new_de_iir;
309 gt_iir = new_gt_iir;
310 }
311
312 return ret;
313}
314
8a905236
JB
315/**
316 * i915_error_work_func - do process context error handling work
317 * @work: work struct
318 *
319 * Fire an error uevent so userspace can see that a hang or error
320 * was detected.
321 */
322static void i915_error_work_func(struct work_struct *work)
323{
324 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
325 error_work);
326 struct drm_device *dev = dev_priv->dev;
f316a42c
BG
327 char *error_event[] = { "ERROR=1", NULL };
328 char *reset_event[] = { "RESET=1", NULL };
329 char *reset_done_event[] = { "ERROR=0", NULL };
8a905236 330
44d98a61 331 DRM_DEBUG_DRIVER("generating error event\n");
f316a42c
BG
332 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
333
ba1234d1 334 if (atomic_read(&dev_priv->mm.wedged)) {
f316a42c 335 if (IS_I965G(dev)) {
44d98a61 336 DRM_DEBUG_DRIVER("resetting chip\n");
f316a42c
BG
337 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
338 if (!i965_reset(dev, GDRST_RENDER)) {
ba1234d1 339 atomic_set(&dev_priv->mm.wedged, 0);
f316a42c
BG
340 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
341 }
342 } else {
44d98a61 343 DRM_DEBUG_DRIVER("reboot required\n");
f316a42c
BG
344 }
345 }
8a905236
JB
346}
347
348/**
349 * i915_capture_error_state - capture an error record for later analysis
350 * @dev: drm device
351 *
352 * Should be called when an error is detected (either a hang or an error
353 * interrupt) to capture error state from the time of the error. Fills
354 * out a structure which becomes available in debugfs for user level tools
355 * to pick up.
356 */
63eeaf38
JB
357static void i915_capture_error_state(struct drm_device *dev)
358{
359 struct drm_i915_private *dev_priv = dev->dev_private;
360 struct drm_i915_error_state *error;
361 unsigned long flags;
362
363 spin_lock_irqsave(&dev_priv->error_lock, flags);
364 if (dev_priv->first_error)
365 goto out;
366
367 error = kmalloc(sizeof(*error), GFP_ATOMIC);
368 if (!error) {
44d98a61 369 DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n");
63eeaf38
JB
370 goto out;
371 }
372
373 error->eir = I915_READ(EIR);
374 error->pgtbl_er = I915_READ(PGTBL_ER);
375 error->pipeastat = I915_READ(PIPEASTAT);
376 error->pipebstat = I915_READ(PIPEBSTAT);
377 error->instpm = I915_READ(INSTPM);
378 if (!IS_I965G(dev)) {
379 error->ipeir = I915_READ(IPEIR);
380 error->ipehr = I915_READ(IPEHR);
381 error->instdone = I915_READ(INSTDONE);
382 error->acthd = I915_READ(ACTHD);
383 } else {
384 error->ipeir = I915_READ(IPEIR_I965);
385 error->ipehr = I915_READ(IPEHR_I965);
386 error->instdone = I915_READ(INSTDONE_I965);
387 error->instps = I915_READ(INSTPS);
388 error->instdone1 = I915_READ(INSTDONE1);
389 error->acthd = I915_READ(ACTHD_I965);
390 }
391
8a905236
JB
392 do_gettimeofday(&error->time);
393
63eeaf38
JB
394 dev_priv->first_error = error;
395
396out:
397 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
398}
399
8a905236
JB
400/**
401 * i915_handle_error - handle an error interrupt
402 * @dev: drm device
403 *
404 * Do some basic checking of regsiter state at error interrupt time and
405 * dump it to the syslog. Also call i915_capture_error_state() to make
406 * sure we get a record and make it available in debugfs. Fire a uevent
407 * so userspace knows something bad happened (should trigger collection
408 * of a ring dump etc.).
409 */
ba1234d1 410static void i915_handle_error(struct drm_device *dev, bool wedged)
8a905236
JB
411{
412 struct drm_i915_private *dev_priv = dev->dev_private;
413 u32 eir = I915_READ(EIR);
414 u32 pipea_stats = I915_READ(PIPEASTAT);
415 u32 pipeb_stats = I915_READ(PIPEBSTAT);
416
417 i915_capture_error_state(dev);
418
419 printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
420 eir);
421
422 if (IS_G4X(dev)) {
423 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
424 u32 ipeir = I915_READ(IPEIR_I965);
425
426 printk(KERN_ERR " IPEIR: 0x%08x\n",
427 I915_READ(IPEIR_I965));
428 printk(KERN_ERR " IPEHR: 0x%08x\n",
429 I915_READ(IPEHR_I965));
430 printk(KERN_ERR " INSTDONE: 0x%08x\n",
431 I915_READ(INSTDONE_I965));
432 printk(KERN_ERR " INSTPS: 0x%08x\n",
433 I915_READ(INSTPS));
434 printk(KERN_ERR " INSTDONE1: 0x%08x\n",
435 I915_READ(INSTDONE1));
436 printk(KERN_ERR " ACTHD: 0x%08x\n",
437 I915_READ(ACTHD_I965));
438 I915_WRITE(IPEIR_I965, ipeir);
439 (void)I915_READ(IPEIR_I965);
440 }
441 if (eir & GM45_ERROR_PAGE_TABLE) {
442 u32 pgtbl_err = I915_READ(PGTBL_ER);
443 printk(KERN_ERR "page table error\n");
444 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
445 pgtbl_err);
446 I915_WRITE(PGTBL_ER, pgtbl_err);
447 (void)I915_READ(PGTBL_ER);
448 }
449 }
450
451 if (IS_I9XX(dev)) {
452 if (eir & I915_ERROR_PAGE_TABLE) {
453 u32 pgtbl_err = I915_READ(PGTBL_ER);
454 printk(KERN_ERR "page table error\n");
455 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
456 pgtbl_err);
457 I915_WRITE(PGTBL_ER, pgtbl_err);
458 (void)I915_READ(PGTBL_ER);
459 }
460 }
461
462 if (eir & I915_ERROR_MEMORY_REFRESH) {
463 printk(KERN_ERR "memory refresh error\n");
464 printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
465 pipea_stats);
466 printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
467 pipeb_stats);
468 /* pipestat has already been acked */
469 }
470 if (eir & I915_ERROR_INSTRUCTION) {
471 printk(KERN_ERR "instruction error\n");
472 printk(KERN_ERR " INSTPM: 0x%08x\n",
473 I915_READ(INSTPM));
474 if (!IS_I965G(dev)) {
475 u32 ipeir = I915_READ(IPEIR);
476
477 printk(KERN_ERR " IPEIR: 0x%08x\n",
478 I915_READ(IPEIR));
479 printk(KERN_ERR " IPEHR: 0x%08x\n",
480 I915_READ(IPEHR));
481 printk(KERN_ERR " INSTDONE: 0x%08x\n",
482 I915_READ(INSTDONE));
483 printk(KERN_ERR " ACTHD: 0x%08x\n",
484 I915_READ(ACTHD));
485 I915_WRITE(IPEIR, ipeir);
486 (void)I915_READ(IPEIR);
487 } else {
488 u32 ipeir = I915_READ(IPEIR_I965);
489
490 printk(KERN_ERR " IPEIR: 0x%08x\n",
491 I915_READ(IPEIR_I965));
492 printk(KERN_ERR " IPEHR: 0x%08x\n",
493 I915_READ(IPEHR_I965));
494 printk(KERN_ERR " INSTDONE: 0x%08x\n",
495 I915_READ(INSTDONE_I965));
496 printk(KERN_ERR " INSTPS: 0x%08x\n",
497 I915_READ(INSTPS));
498 printk(KERN_ERR " INSTDONE1: 0x%08x\n",
499 I915_READ(INSTDONE1));
500 printk(KERN_ERR " ACTHD: 0x%08x\n",
501 I915_READ(ACTHD_I965));
502 I915_WRITE(IPEIR_I965, ipeir);
503 (void)I915_READ(IPEIR_I965);
504 }
505 }
506
507 I915_WRITE(EIR, eir);
508 (void)I915_READ(EIR);
509 eir = I915_READ(EIR);
510 if (eir) {
511 /*
512 * some errors might have become stuck,
513 * mask them.
514 */
515 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
516 I915_WRITE(EMR, I915_READ(EMR) | eir);
517 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
518 }
519
ba1234d1
BG
520 if (wedged) {
521 atomic_set(&dev_priv->mm.wedged, 1);
522
11ed50ec
BG
523 /*
524 * Wakeup waiting processes so they don't hang
525 */
526 printk("i915: Waking up sleeping processes\n");
527 DRM_WAKEUP(&dev_priv->irq_queue);
528 }
529
9c9fe1f8 530 queue_work(dev_priv->wq, &dev_priv->error_work);
8a905236
JB
531}
532
1da177e4
LT
533irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
534{
84b1fd10 535 struct drm_device *dev = (struct drm_device *) arg;
1da177e4 536 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
7c1c2871 537 struct drm_i915_master_private *master_priv;
cdfbc41f
EA
538 u32 iir, new_iir;
539 u32 pipea_stats, pipeb_stats;
05eff845
KP
540 u32 vblank_status;
541 u32 vblank_enable;
0a3e67a4 542 int vblank = 0;
7c463586 543 unsigned long irqflags;
05eff845
KP
544 int irq_received;
545 int ret = IRQ_NONE;
6e5fca53 546
630681d9
EA
547 atomic_inc(&dev_priv->irq_received);
548
036a4a7d
ZW
549 if (IS_IGDNG(dev))
550 return igdng_irq_handler(dev);
551
ed4cb414 552 iir = I915_READ(IIR);
a6b54f3f 553
05eff845
KP
554 if (IS_I965G(dev)) {
555 vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
556 vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
557 } else {
558 vblank_status = I915_VBLANK_INTERRUPT_STATUS;
559 vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
560 }
af6061af 561
05eff845
KP
562 for (;;) {
563 irq_received = iir != 0;
564
565 /* Can't rely on pipestat interrupt bit in iir as it might
566 * have been cleared after the pipestat interrupt was received.
567 * It doesn't set the bit in iir again, but it still produces
568 * interrupts (for non-MSI).
569 */
570 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
571 pipea_stats = I915_READ(PIPEASTAT);
572 pipeb_stats = I915_READ(PIPEBSTAT);
79e53945 573
8a905236 574 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
ba1234d1 575 i915_handle_error(dev, false);
8a905236 576
cdfbc41f
EA
577 /*
578 * Clear the PIPE(A|B)STAT regs before the IIR
579 */
05eff845 580 if (pipea_stats & 0x8000ffff) {
7662c8bd 581 if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
44d98a61 582 DRM_DEBUG_DRIVER("pipe a underrun\n");
cdfbc41f 583 I915_WRITE(PIPEASTAT, pipea_stats);
05eff845 584 irq_received = 1;
cdfbc41f 585 }
1da177e4 586
05eff845 587 if (pipeb_stats & 0x8000ffff) {
7662c8bd 588 if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
44d98a61 589 DRM_DEBUG_DRIVER("pipe b underrun\n");
cdfbc41f 590 I915_WRITE(PIPEBSTAT, pipeb_stats);
05eff845 591 irq_received = 1;
cdfbc41f 592 }
05eff845
KP
593 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
594
595 if (!irq_received)
596 break;
597
598 ret = IRQ_HANDLED;
8ee1c3db 599
5ca58282
JB
600 /* Consume port. Then clear IIR or we'll miss events */
601 if ((I915_HAS_HOTPLUG(dev)) &&
602 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
603 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
604
44d98a61 605 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
5ca58282
JB
606 hotplug_status);
607 if (hotplug_status & dev_priv->hotplug_supported_mask)
9c9fe1f8
EA
608 queue_work(dev_priv->wq,
609 &dev_priv->hotplug_work);
5ca58282
JB
610
611 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
612 I915_READ(PORT_HOTPLUG_STAT);
04302965
SL
613
614 /* EOS interrupts occurs */
615 if (IS_IGD(dev) &&
616 (hotplug_status & CRT_EOS_INT_STATUS)) {
617 u32 temp;
618
44d98a61 619 DRM_DEBUG_DRIVER("EOS interrupt occurs\n");
04302965
SL
620 /* status is already cleared */
621 temp = I915_READ(ADPA);
622 temp &= ~ADPA_DAC_ENABLE;
623 I915_WRITE(ADPA, temp);
624
625 temp = I915_READ(PORT_HOTPLUG_EN);
626 temp &= ~CRT_EOS_INT_EN;
627 I915_WRITE(PORT_HOTPLUG_EN, temp);
628
629 temp = I915_READ(PORT_HOTPLUG_STAT);
630 if (temp & CRT_EOS_INT_STATUS)
631 I915_WRITE(PORT_HOTPLUG_STAT,
632 CRT_EOS_INT_STATUS);
633 }
5ca58282
JB
634 }
635
cdfbc41f
EA
636 I915_WRITE(IIR, iir);
637 new_iir = I915_READ(IIR); /* Flush posted writes */
7c463586 638
7c1c2871
DA
639 if (dev->primary->master) {
640 master_priv = dev->primary->master->driver_priv;
641 if (master_priv->sarea_priv)
642 master_priv->sarea_priv->last_dispatch =
643 READ_BREADCRUMB(dev_priv);
644 }
0a3e67a4 645
cdfbc41f 646 if (iir & I915_USER_INTERRUPT) {
1c5d22f7
CW
647 u32 seqno = i915_get_gem_seqno(dev);
648 dev_priv->mm.irq_gem_seqno = seqno;
649 trace_i915_gem_request_complete(dev, seqno);
cdfbc41f 650 DRM_WAKEUP(&dev_priv->irq_queue);
f65d9421
BG
651 dev_priv->hangcheck_count = 0;
652 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
cdfbc41f 653 }
673a394b 654
05eff845 655 if (pipea_stats & vblank_status) {
cdfbc41f
EA
656 vblank++;
657 drm_handle_vblank(dev, 0);
658 }
7c463586 659
05eff845 660 if (pipeb_stats & vblank_status) {
cdfbc41f
EA
661 vblank++;
662 drm_handle_vblank(dev, 1);
663 }
7c463586 664
cdfbc41f
EA
665 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
666 (iir & I915_ASLE_INTERRUPT))
667 opregion_asle_intr(dev);
668
669 /* With MSI, interrupts are only generated when iir
670 * transitions from zero to nonzero. If another bit got
671 * set while we were handling the existing iir bits, then
672 * we would never get another interrupt.
673 *
674 * This is fine on non-MSI as well, as if we hit this path
675 * we avoid exiting the interrupt handler only to generate
676 * another one.
677 *
678 * Note that for MSI this could cause a stray interrupt report
679 * if an interrupt landed in the time between writing IIR and
680 * the posting read. This should be rare enough to never
681 * trigger the 99% of 100,000 interrupts test for disabling
682 * stray interrupts.
683 */
684 iir = new_iir;
05eff845 685 }
0a3e67a4 686
05eff845 687 return ret;
1da177e4
LT
688}
689
af6061af 690static int i915_emit_irq(struct drm_device * dev)
1da177e4
LT
691{
692 drm_i915_private_t *dev_priv = dev->dev_private;
7c1c2871 693 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1da177e4
LT
694 RING_LOCALS;
695
696 i915_kernel_lost_context(dev);
697
44d98a61 698 DRM_DEBUG_DRIVER("\n");
1da177e4 699
c99b058f 700 dev_priv->counter++;
c29b669c 701 if (dev_priv->counter > 0x7FFFFFFFUL)
c99b058f 702 dev_priv->counter = 1;
7c1c2871
DA
703 if (master_priv->sarea_priv)
704 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
c29b669c 705
0baf823a 706 BEGIN_LP_RING(4);
585fb111 707 OUT_RING(MI_STORE_DWORD_INDEX);
0baf823a 708 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
c29b669c 709 OUT_RING(dev_priv->counter);
585fb111 710 OUT_RING(MI_USER_INTERRUPT);
1da177e4 711 ADVANCE_LP_RING();
bc5f4523 712
c29b669c 713 return dev_priv->counter;
1da177e4
LT
714}
715
673a394b 716void i915_user_irq_get(struct drm_device *dev)
ed4cb414
EA
717{
718 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e9d21d7f 719 unsigned long irqflags;
ed4cb414 720
e9d21d7f 721 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
036a4a7d
ZW
722 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
723 if (IS_IGDNG(dev))
724 igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
725 else
726 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
727 }
e9d21d7f 728 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
ed4cb414
EA
729}
730
0a3e67a4 731void i915_user_irq_put(struct drm_device *dev)
ed4cb414
EA
732{
733 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e9d21d7f 734 unsigned long irqflags;
ed4cb414 735
e9d21d7f 736 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
ed4cb414 737 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
036a4a7d
ZW
738 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
739 if (IS_IGDNG(dev))
740 igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
741 else
742 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
743 }
e9d21d7f 744 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
ed4cb414
EA
745}
746
9d34e5db
CW
747void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
748{
749 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
750
751 if (dev_priv->trace_irq_seqno == 0)
752 i915_user_irq_get(dev);
753
754 dev_priv->trace_irq_seqno = seqno;
755}
756
84b1fd10 757static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1da177e4
LT
758{
759 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
7c1c2871 760 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1da177e4
LT
761 int ret = 0;
762
44d98a61 763 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
1da177e4
LT
764 READ_BREADCRUMB(dev_priv));
765
ed4cb414 766 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
7c1c2871
DA
767 if (master_priv->sarea_priv)
768 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1da177e4 769 return 0;
ed4cb414 770 }
1da177e4 771
7c1c2871
DA
772 if (master_priv->sarea_priv)
773 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1da177e4 774
ed4cb414 775 i915_user_irq_get(dev);
1da177e4
LT
776 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
777 READ_BREADCRUMB(dev_priv) >= irq_nr);
ed4cb414 778 i915_user_irq_put(dev);
1da177e4 779
20caafa6 780 if (ret == -EBUSY) {
3e684eae 781 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1da177e4
LT
782 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
783 }
784
af6061af
DA
785 return ret;
786}
787
1da177e4
LT
788/* Needs the lock as it touches the ring.
789 */
c153f45f
EA
790int i915_irq_emit(struct drm_device *dev, void *data,
791 struct drm_file *file_priv)
1da177e4 792{
1da177e4 793 drm_i915_private_t *dev_priv = dev->dev_private;
c153f45f 794 drm_i915_irq_emit_t *emit = data;
1da177e4
LT
795 int result;
796
07f4f8bf 797 if (!dev_priv || !dev_priv->ring.virtual_start) {
3e684eae 798 DRM_ERROR("called with no initialization\n");
20caafa6 799 return -EINVAL;
1da177e4 800 }
299eb93c
EA
801
802 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
803
546b0974 804 mutex_lock(&dev->struct_mutex);
1da177e4 805 result = i915_emit_irq(dev);
546b0974 806 mutex_unlock(&dev->struct_mutex);
1da177e4 807
c153f45f 808 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
1da177e4 809 DRM_ERROR("copy_to_user\n");
20caafa6 810 return -EFAULT;
1da177e4
LT
811 }
812
813 return 0;
814}
815
816/* Doesn't need the hardware lock.
817 */
c153f45f
EA
818int i915_irq_wait(struct drm_device *dev, void *data,
819 struct drm_file *file_priv)
1da177e4 820{
1da177e4 821 drm_i915_private_t *dev_priv = dev->dev_private;
c153f45f 822 drm_i915_irq_wait_t *irqwait = data;
1da177e4
LT
823
824 if (!dev_priv) {
3e684eae 825 DRM_ERROR("called with no initialization\n");
20caafa6 826 return -EINVAL;
1da177e4
LT
827 }
828
c153f45f 829 return i915_wait_irq(dev, irqwait->irq_seq);
1da177e4
LT
830}
831
42f52ef8
KP
832/* Called from drm generic code, passed 'crtc' which
833 * we use as a pipe index
834 */
835int i915_enable_vblank(struct drm_device *dev, int pipe)
0a3e67a4
JB
836{
837 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e9d21d7f 838 unsigned long irqflags;
71e0ffa5
JB
839 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
840 u32 pipeconf;
841
842 pipeconf = I915_READ(pipeconf_reg);
843 if (!(pipeconf & PIPEACONF_ENABLE))
844 return -EINVAL;
0a3e67a4 845
036a4a7d
ZW
846 if (IS_IGDNG(dev))
847 return 0;
848
e9d21d7f 849 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
e9d21d7f 850 if (IS_I965G(dev))
7c463586
KP
851 i915_enable_pipestat(dev_priv, pipe,
852 PIPE_START_VBLANK_INTERRUPT_ENABLE);
e9d21d7f 853 else
7c463586
KP
854 i915_enable_pipestat(dev_priv, pipe,
855 PIPE_VBLANK_INTERRUPT_ENABLE);
e9d21d7f 856 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
0a3e67a4
JB
857 return 0;
858}
859
42f52ef8
KP
860/* Called from drm generic code, passed 'crtc' which
861 * we use as a pipe index
862 */
863void i915_disable_vblank(struct drm_device *dev, int pipe)
0a3e67a4
JB
864{
865 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e9d21d7f 866 unsigned long irqflags;
0a3e67a4 867
036a4a7d
ZW
868 if (IS_IGDNG(dev))
869 return;
870
e9d21d7f 871 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
7c463586
KP
872 i915_disable_pipestat(dev_priv, pipe,
873 PIPE_VBLANK_INTERRUPT_ENABLE |
874 PIPE_START_VBLANK_INTERRUPT_ENABLE);
e9d21d7f 875 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
0a3e67a4
JB
876}
877
79e53945
JB
878void i915_enable_interrupt (struct drm_device *dev)
879{
880 struct drm_i915_private *dev_priv = dev->dev_private;
e170b030
ZW
881
882 if (!IS_IGDNG(dev))
883 opregion_enable_asle(dev);
79e53945
JB
884 dev_priv->irq_enabled = 1;
885}
886
887
702880f2
DA
888/* Set the vblank monitor pipe
889 */
c153f45f
EA
890int i915_vblank_pipe_set(struct drm_device *dev, void *data,
891 struct drm_file *file_priv)
702880f2 892{
702880f2 893 drm_i915_private_t *dev_priv = dev->dev_private;
702880f2
DA
894
895 if (!dev_priv) {
3e684eae 896 DRM_ERROR("called with no initialization\n");
20caafa6 897 return -EINVAL;
702880f2
DA
898 }
899
5b51694a 900 return 0;
702880f2
DA
901}
902
c153f45f
EA
903int i915_vblank_pipe_get(struct drm_device *dev, void *data,
904 struct drm_file *file_priv)
702880f2 905{
702880f2 906 drm_i915_private_t *dev_priv = dev->dev_private;
c153f45f 907 drm_i915_vblank_pipe_t *pipe = data;
702880f2
DA
908
909 if (!dev_priv) {
3e684eae 910 DRM_ERROR("called with no initialization\n");
20caafa6 911 return -EINVAL;
702880f2
DA
912 }
913
0a3e67a4 914 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
c153f45f 915
702880f2
DA
916 return 0;
917}
918
a6b54f3f
MD
919/**
920 * Schedule buffer swap at given vertical blank.
921 */
c153f45f
EA
922int i915_vblank_swap(struct drm_device *dev, void *data,
923 struct drm_file *file_priv)
a6b54f3f 924{
bd95e0a4
EA
925 /* The delayed swap mechanism was fundamentally racy, and has been
926 * removed. The model was that the client requested a delayed flip/swap
927 * from the kernel, then waited for vblank before continuing to perform
928 * rendering. The problem was that the kernel might wake the client
929 * up before it dispatched the vblank swap (since the lock has to be
930 * held while touching the ringbuffer), in which case the client would
931 * clear and start the next frame before the swap occurred, and
932 * flicker would occur in addition to likely missing the vblank.
933 *
934 * In the absence of this ioctl, userland falls back to a correct path
935 * of waiting for a vblank, then dispatching the swap on its own.
936 * Context switching to userland and back is plenty fast enough for
937 * meeting the requirements of vblank swapping.
0a3e67a4 938 */
bd95e0a4 939 return -EINVAL;
a6b54f3f
MD
940}
941
f65d9421
BG
942struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
943 drm_i915_private_t *dev_priv = dev->dev_private;
944 return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
945}
946
947/**
948 * This is called when the chip hasn't reported back with completed
949 * batchbuffers in a long time. The first time this is called we simply record
950 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
951 * again, we assume the chip is wedged and try to fix it.
952 */
953void i915_hangcheck_elapsed(unsigned long data)
954{
955 struct drm_device *dev = (struct drm_device *)data;
956 drm_i915_private_t *dev_priv = dev->dev_private;
957 uint32_t acthd;
958
959 if (!IS_I965G(dev))
960 acthd = I915_READ(ACTHD);
961 else
962 acthd = I915_READ(ACTHD_I965);
963
964 /* If all work is done then ACTHD clearly hasn't advanced. */
965 if (list_empty(&dev_priv->mm.request_list) ||
966 i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
967 dev_priv->hangcheck_count = 0;
968 return;
969 }
970
971 if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
972 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
ba1234d1 973 i915_handle_error(dev, true);
f65d9421
BG
974 return;
975 }
976
977 /* Reset timer case chip hangs without another request being added */
978 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
979
980 if (acthd != dev_priv->last_acthd)
981 dev_priv->hangcheck_count = 0;
982 else
983 dev_priv->hangcheck_count++;
984
985 dev_priv->last_acthd = acthd;
986}
987
1da177e4
LT
988/* drm_dma.h hooks
989*/
036a4a7d
ZW
990static void igdng_irq_preinstall(struct drm_device *dev)
991{
992 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
993
994 I915_WRITE(HWSTAM, 0xeffe);
995
996 /* XXX hotplug from PCH */
997
998 I915_WRITE(DEIMR, 0xffffffff);
999 I915_WRITE(DEIER, 0x0);
1000 (void) I915_READ(DEIER);
1001
1002 /* and GT */
1003 I915_WRITE(GTIMR, 0xffffffff);
1004 I915_WRITE(GTIER, 0x0);
1005 (void) I915_READ(GTIER);
1006}
1007
1008static int igdng_irq_postinstall(struct drm_device *dev)
1009{
1010 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1011 /* enable kind of interrupts always enabled */
01c66889 1012 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE /*| DE_PCH_EVENT */;
036a4a7d
ZW
1013 u32 render_mask = GT_USER_INTERRUPT;
1014
1015 dev_priv->irq_mask_reg = ~display_mask;
1016 dev_priv->de_irq_enable_reg = display_mask;
1017
1018 /* should always can generate irq */
1019 I915_WRITE(DEIIR, I915_READ(DEIIR));
1020 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
1021 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
1022 (void) I915_READ(DEIER);
1023
1024 /* user interrupt should be enabled, but masked initial */
1025 dev_priv->gt_irq_mask_reg = 0xffffffff;
1026 dev_priv->gt_irq_enable_reg = render_mask;
1027
1028 I915_WRITE(GTIIR, I915_READ(GTIIR));
1029 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
1030 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
1031 (void) I915_READ(GTIER);
1032
1033 return 0;
1034}
1035
84b1fd10 1036void i915_driver_irq_preinstall(struct drm_device * dev)
1da177e4
LT
1037{
1038 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1039
79e53945
JB
1040 atomic_set(&dev_priv->irq_received, 0);
1041
036a4a7d 1042 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
8a905236 1043 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
036a4a7d
ZW
1044
1045 if (IS_IGDNG(dev)) {
1046 igdng_irq_preinstall(dev);
1047 return;
1048 }
1049
5ca58282
JB
1050 if (I915_HAS_HOTPLUG(dev)) {
1051 I915_WRITE(PORT_HOTPLUG_EN, 0);
1052 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1053 }
1054
0a3e67a4 1055 I915_WRITE(HWSTAM, 0xeffe);
7c463586
KP
1056 I915_WRITE(PIPEASTAT, 0);
1057 I915_WRITE(PIPEBSTAT, 0);
0a3e67a4 1058 I915_WRITE(IMR, 0xffffffff);
ed4cb414 1059 I915_WRITE(IER, 0x0);
7c463586 1060 (void) I915_READ(IER);
1da177e4
LT
1061}
1062
0a3e67a4 1063int i915_driver_irq_postinstall(struct drm_device *dev)
1da177e4
LT
1064{
1065 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
5ca58282 1066 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
63eeaf38 1067 u32 error_mask;
0a3e67a4 1068
036a4a7d
ZW
1069 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
1070
0a3e67a4 1071 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
0a3e67a4 1072
036a4a7d
ZW
1073 if (IS_IGDNG(dev))
1074 return igdng_irq_postinstall(dev);
1075
7c463586
KP
1076 /* Unmask the interrupts that we always want on. */
1077 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
1078
1079 dev_priv->pipestat[0] = 0;
1080 dev_priv->pipestat[1] = 0;
1081
5ca58282
JB
1082 if (I915_HAS_HOTPLUG(dev)) {
1083 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1084
1085 /* Leave other bits alone */
1086 hotplug_en |= HOTPLUG_EN_MASK;
1087 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1088
1089 dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
1090 TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
1091 SDVOB_HOTPLUG_INT_STATUS;
1092 if (IS_G4X(dev)) {
1093 dev_priv->hotplug_supported_mask |=
1094 HDMIB_HOTPLUG_INT_STATUS |
1095 HDMIC_HOTPLUG_INT_STATUS |
1096 HDMID_HOTPLUG_INT_STATUS;
1097 }
1098 /* Enable in IER... */
1099 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1100 /* and unmask in IMR */
1101 i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT);
1102 }
1103
63eeaf38
JB
1104 /*
1105 * Enable some error detection, note the instruction error mask
1106 * bit is reserved, so we leave it masked.
1107 */
1108 if (IS_G4X(dev)) {
1109 error_mask = ~(GM45_ERROR_PAGE_TABLE |
1110 GM45_ERROR_MEM_PRIV |
1111 GM45_ERROR_CP_PRIV |
1112 I915_ERROR_MEMORY_REFRESH);
1113 } else {
1114 error_mask = ~(I915_ERROR_PAGE_TABLE |
1115 I915_ERROR_MEMORY_REFRESH);
1116 }
1117 I915_WRITE(EMR, error_mask);
1118
7c463586
KP
1119 /* Disable pipe interrupt enables, clear pending pipe status */
1120 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1121 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1122 /* Clear pending interrupt status */
1123 I915_WRITE(IIR, I915_READ(IIR));
8ee1c3db 1124
5ca58282 1125 I915_WRITE(IER, enable_mask);
7c463586 1126 I915_WRITE(IMR, dev_priv->irq_mask_reg);
ed4cb414
EA
1127 (void) I915_READ(IER);
1128
8ee1c3db 1129 opregion_enable_asle(dev);
0a3e67a4
JB
1130
1131 return 0;
1da177e4
LT
1132}
1133
036a4a7d
ZW
1134static void igdng_irq_uninstall(struct drm_device *dev)
1135{
1136 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1137 I915_WRITE(HWSTAM, 0xffffffff);
1138
1139 I915_WRITE(DEIMR, 0xffffffff);
1140 I915_WRITE(DEIER, 0x0);
1141 I915_WRITE(DEIIR, I915_READ(DEIIR));
1142
1143 I915_WRITE(GTIMR, 0xffffffff);
1144 I915_WRITE(GTIER, 0x0);
1145 I915_WRITE(GTIIR, I915_READ(GTIIR));
1146}
1147
84b1fd10 1148void i915_driver_irq_uninstall(struct drm_device * dev)
1da177e4
LT
1149{
1150 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
91e3738e 1151
1da177e4
LT
1152 if (!dev_priv)
1153 return;
1154
0a3e67a4
JB
1155 dev_priv->vblank_pipe = 0;
1156
036a4a7d
ZW
1157 if (IS_IGDNG(dev)) {
1158 igdng_irq_uninstall(dev);
1159 return;
1160 }
1161
5ca58282
JB
1162 if (I915_HAS_HOTPLUG(dev)) {
1163 I915_WRITE(PORT_HOTPLUG_EN, 0);
1164 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1165 }
1166
0a3e67a4 1167 I915_WRITE(HWSTAM, 0xffffffff);
7c463586
KP
1168 I915_WRITE(PIPEASTAT, 0);
1169 I915_WRITE(PIPEBSTAT, 0);
0a3e67a4 1170 I915_WRITE(IMR, 0xffffffff);
ed4cb414 1171 I915_WRITE(IER, 0x0);
af6061af 1172
7c463586
KP
1173 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1174 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1175 I915_WRITE(IIR, I915_READ(IIR));
1da177e4 1176}