1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/sysrq.h>
30 #include <linux/slab.h>
35 #include "i915_trace.h"
36 #include "intel_drv.h"
38 #define MAX_NOPID ((u32)~0)
41 * Interrupts that are always left unmasked.
43 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
44 * we leave them always unmasked in IMR and then control enabling them through
47 #define I915_INTERRUPT_ENABLE_FIX \
48 (I915_ASLE_INTERRUPT | \
49 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
50 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
51 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
52 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
55 /** Interrupts that we mask and unmask at runtime. */
56 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
58 #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
59 PIPE_VBLANK_INTERRUPT_STATUS)
61 #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
62 PIPE_VBLANK_INTERRUPT_ENABLE)
64 #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
65 DRM_I915_VBLANK_PIPE_B)
68 ironlake_enable_graphics_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
70 if ((dev_priv
->gt_irq_mask_reg
& mask
) != 0) {
71 dev_priv
->gt_irq_mask_reg
&= ~mask
;
72 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask_reg
);
73 (void) I915_READ(GTIMR
);
78 ironlake_disable_graphics_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
80 if ((dev_priv
->gt_irq_mask_reg
& mask
) != mask
) {
81 dev_priv
->gt_irq_mask_reg
|= mask
;
82 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask_reg
);
83 (void) I915_READ(GTIMR
);
87 /* For display hotplug interrupt */
89 ironlake_enable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
91 if ((dev_priv
->irq_mask_reg
& mask
) != 0) {
92 dev_priv
->irq_mask_reg
&= ~mask
;
93 I915_WRITE(DEIMR
, dev_priv
->irq_mask_reg
);
94 (void) I915_READ(DEIMR
);
99 ironlake_disable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
101 if ((dev_priv
->irq_mask_reg
& mask
) != mask
) {
102 dev_priv
->irq_mask_reg
|= mask
;
103 I915_WRITE(DEIMR
, dev_priv
->irq_mask_reg
);
104 (void) I915_READ(DEIMR
);
109 i915_enable_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
111 if ((dev_priv
->irq_mask_reg
& mask
) != 0) {
112 dev_priv
->irq_mask_reg
&= ~mask
;
113 I915_WRITE(IMR
, dev_priv
->irq_mask_reg
);
114 (void) I915_READ(IMR
);
119 i915_disable_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
121 if ((dev_priv
->irq_mask_reg
& mask
) != mask
) {
122 dev_priv
->irq_mask_reg
|= mask
;
123 I915_WRITE(IMR
, dev_priv
->irq_mask_reg
);
124 (void) I915_READ(IMR
);
129 i915_pipestat(int pipe
)
139 i915_enable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
141 if ((dev_priv
->pipestat
[pipe
] & mask
) != mask
) {
142 u32 reg
= i915_pipestat(pipe
);
144 dev_priv
->pipestat
[pipe
] |= mask
;
145 /* Enable the interrupt, clear any pending status */
146 I915_WRITE(reg
, dev_priv
->pipestat
[pipe
] | (mask
>> 16));
147 (void) I915_READ(reg
);
152 i915_disable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
154 if ((dev_priv
->pipestat
[pipe
] & mask
) != 0) {
155 u32 reg
= i915_pipestat(pipe
);
157 dev_priv
->pipestat
[pipe
] &= ~mask
;
158 I915_WRITE(reg
, dev_priv
->pipestat
[pipe
]);
159 (void) I915_READ(reg
);
164 * intel_enable_asle - enable ASLE interrupt for OpRegion
166 void intel_enable_asle (struct drm_device
*dev
)
168 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
170 if (HAS_PCH_SPLIT(dev
))
171 ironlake_enable_display_irq(dev_priv
, DE_GSE
);
173 i915_enable_pipestat(dev_priv
, 1,
174 I915_LEGACY_BLC_EVENT_ENABLE
);
176 i915_enable_pipestat(dev_priv
, 0,
177 I915_LEGACY_BLC_EVENT_ENABLE
);
182 * i915_pipe_enabled - check if a pipe is enabled
184 * @pipe: pipe to check
186 * Reading certain registers when the pipe is disabled can hang the chip.
187 * Use this routine to make sure the PLL is running and the pipe is active
188 * before reading such registers if unsure.
191 i915_pipe_enabled(struct drm_device
*dev
, int pipe
)
193 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
194 unsigned long pipeconf
= pipe
? PIPEBCONF
: PIPEACONF
;
196 if (I915_READ(pipeconf
) & PIPEACONF_ENABLE
)
202 /* Called from drm generic code, passed a 'crtc', which
203 * we use as a pipe index
205 u32
i915_get_vblank_counter(struct drm_device
*dev
, int pipe
)
207 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
208 unsigned long high_frame
;
209 unsigned long low_frame
;
210 u32 high1
, high2
, low
, count
;
212 high_frame
= pipe
? PIPEBFRAMEHIGH
: PIPEAFRAMEHIGH
;
213 low_frame
= pipe
? PIPEBFRAMEPIXEL
: PIPEAFRAMEPIXEL
;
215 if (!i915_pipe_enabled(dev
, pipe
)) {
216 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
222 * High & low register fields aren't synchronized, so make sure
223 * we get a low value that's stable across two reads of the high
227 high1
= ((I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
) >>
228 PIPE_FRAME_HIGH_SHIFT
);
229 low
= ((I915_READ(low_frame
) & PIPE_FRAME_LOW_MASK
) >>
230 PIPE_FRAME_LOW_SHIFT
);
231 high2
= ((I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
) >>
232 PIPE_FRAME_HIGH_SHIFT
);
233 } while (high1
!= high2
);
235 count
= (high1
<< 8) | low
;
240 u32
gm45_get_vblank_counter(struct drm_device
*dev
, int pipe
)
242 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
243 int reg
= pipe
? PIPEB_FRMCOUNT_GM45
: PIPEA_FRMCOUNT_GM45
;
245 if (!i915_pipe_enabled(dev
, pipe
)) {
246 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
251 return I915_READ(reg
);
255 * Handle hotplug events outside the interrupt handler proper.
257 static void i915_hotplug_work_func(struct work_struct
*work
)
259 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
261 struct drm_device
*dev
= dev_priv
->dev
;
262 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
263 struct drm_encoder
*encoder
;
265 if (mode_config
->num_encoder
) {
266 list_for_each_entry(encoder
, &mode_config
->encoder_list
, head
) {
267 struct intel_encoder
*intel_encoder
= enc_to_intel_encoder(encoder
);
269 if (intel_encoder
->hot_plug
)
270 (*intel_encoder
->hot_plug
) (intel_encoder
);
273 /* Just fire off a uevent and let userspace tell us what to do */
274 drm_helper_hpd_irq_event(dev
);
277 static void i915_handle_rps_change(struct drm_device
*dev
)
279 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
280 u32 busy_up
, busy_down
, max_avg
, min_avg
;
282 u8 new_delay
= dev_priv
->cur_delay
;
284 I915_WRITE(MEMINTRSTS
, I915_READ(MEMINTRSTS
) & ~MEMINT_EVAL_CHG
);
285 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
286 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
287 max_avg
= I915_READ(RCBMAXAVG
);
288 min_avg
= I915_READ(RCBMINAVG
);
290 /* Handle RCS change request from hw */
291 if (busy_up
> max_avg
) {
292 if (dev_priv
->cur_delay
!= dev_priv
->max_delay
)
293 new_delay
= dev_priv
->cur_delay
- 1;
294 if (new_delay
< dev_priv
->max_delay
)
295 new_delay
= dev_priv
->max_delay
;
296 } else if (busy_down
< min_avg
) {
297 if (dev_priv
->cur_delay
!= dev_priv
->min_delay
)
298 new_delay
= dev_priv
->cur_delay
+ 1;
299 if (new_delay
> dev_priv
->min_delay
)
300 new_delay
= dev_priv
->min_delay
;
303 DRM_DEBUG("rps change requested: %d -> %d\n",
304 dev_priv
->cur_delay
, new_delay
);
306 rgvswctl
= I915_READ(MEMSWCTL
);
307 if (rgvswctl
& MEMCTL_CMD_STS
) {
308 DRM_ERROR("gpu busy, RCS change rejected\n");
309 return; /* still busy with another command */
312 /* Program the new state */
313 rgvswctl
= (MEMCTL_CMD_CHFREQ
<< MEMCTL_CMD_SHIFT
) |
314 (new_delay
<< MEMCTL_FREQ_SHIFT
) | MEMCTL_SFCAVM
;
315 I915_WRITE(MEMSWCTL
, rgvswctl
);
316 POSTING_READ(MEMSWCTL
);
318 rgvswctl
|= MEMCTL_CMD_STS
;
319 I915_WRITE(MEMSWCTL
, rgvswctl
);
321 dev_priv
->cur_delay
= new_delay
;
323 DRM_DEBUG("rps changed\n");
328 irqreturn_t
ironlake_irq_handler(struct drm_device
*dev
)
330 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
332 u32 de_iir
, gt_iir
, de_ier
, pch_iir
;
333 struct drm_i915_master_private
*master_priv
;
335 /* disable master interrupt before clearing iir */
336 de_ier
= I915_READ(DEIER
);
337 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
338 (void)I915_READ(DEIER
);
340 de_iir
= I915_READ(DEIIR
);
341 gt_iir
= I915_READ(GTIIR
);
342 pch_iir
= I915_READ(SDEIIR
);
344 if (de_iir
== 0 && gt_iir
== 0 && pch_iir
== 0)
349 if (dev
->primary
->master
) {
350 master_priv
= dev
->primary
->master
->driver_priv
;
351 if (master_priv
->sarea_priv
)
352 master_priv
->sarea_priv
->last_dispatch
=
353 READ_BREADCRUMB(dev_priv
);
356 if (gt_iir
& GT_PIPE_NOTIFY
) {
357 u32 seqno
= i915_get_gem_seqno(dev
);
358 dev_priv
->mm
.irq_gem_seqno
= seqno
;
359 trace_i915_gem_request_complete(dev
, seqno
);
360 DRM_WAKEUP(&dev_priv
->irq_queue
);
361 dev_priv
->hangcheck_count
= 0;
362 mod_timer(&dev_priv
->hangcheck_timer
, jiffies
+ DRM_I915_HANGCHECK_PERIOD
);
366 ironlake_opregion_gse_intr(dev
);
368 if (de_iir
& DE_PLANEA_FLIP_DONE
) {
369 intel_prepare_page_flip(dev
, 0);
370 intel_finish_page_flip(dev
, 0);
373 if (de_iir
& DE_PLANEB_FLIP_DONE
) {
374 intel_prepare_page_flip(dev
, 1);
375 intel_finish_page_flip(dev
, 1);
378 if (de_iir
& DE_PIPEA_VBLANK
)
379 drm_handle_vblank(dev
, 0);
381 if (de_iir
& DE_PIPEB_VBLANK
)
382 drm_handle_vblank(dev
, 1);
384 /* check event from PCH */
385 if ((de_iir
& DE_PCH_EVENT
) &&
386 (pch_iir
& SDE_HOTPLUG_MASK
)) {
387 queue_work(dev_priv
->wq
, &dev_priv
->hotplug_work
);
390 if (de_iir
& DE_PCU_EVENT
) {
391 I915_WRITE(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
392 i915_handle_rps_change(dev
);
395 /* should clear PCH hotplug event before clear CPU irq */
396 I915_WRITE(SDEIIR
, pch_iir
);
397 I915_WRITE(GTIIR
, gt_iir
);
398 I915_WRITE(DEIIR
, de_iir
);
401 I915_WRITE(DEIER
, de_ier
);
402 (void)I915_READ(DEIER
);
408 * i915_error_work_func - do process context error handling work
411 * Fire an error uevent so userspace can see that a hang or error
414 static void i915_error_work_func(struct work_struct
*work
)
416 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
418 struct drm_device
*dev
= dev_priv
->dev
;
419 char *error_event
[] = { "ERROR=1", NULL
};
420 char *reset_event
[] = { "RESET=1", NULL
};
421 char *reset_done_event
[] = { "ERROR=0", NULL
};
423 DRM_DEBUG_DRIVER("generating error event\n");
424 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, error_event
);
426 if (atomic_read(&dev_priv
->mm
.wedged
)) {
428 DRM_DEBUG_DRIVER("resetting chip\n");
429 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, reset_event
);
430 if (!i965_reset(dev
, GDRST_RENDER
)) {
431 atomic_set(&dev_priv
->mm
.wedged
, 0);
432 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, reset_done_event
);
435 DRM_DEBUG_DRIVER("reboot required\n");
440 static struct drm_i915_error_object
*
441 i915_error_object_create(struct drm_device
*dev
,
442 struct drm_gem_object
*src
)
444 struct drm_i915_error_object
*dst
;
445 struct drm_i915_gem_object
*src_priv
;
446 int page
, page_count
;
451 src_priv
= to_intel_bo(src
);
452 if (src_priv
->pages
== NULL
)
455 page_count
= src
->size
/ PAGE_SIZE
;
457 dst
= kmalloc(sizeof(*dst
) + page_count
* sizeof (u32
*), GFP_ATOMIC
);
461 for (page
= 0; page
< page_count
; page
++) {
462 void *s
, *d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
467 local_irq_save(flags
);
468 s
= kmap_atomic(src_priv
->pages
[page
], KM_IRQ0
);
469 memcpy(d
, s
, PAGE_SIZE
);
470 kunmap_atomic(s
, KM_IRQ0
);
471 local_irq_restore(flags
);
472 dst
->pages
[page
] = d
;
474 dst
->page_count
= page_count
;
475 dst
->gtt_offset
= src_priv
->gtt_offset
;
481 kfree(dst
->pages
[page
]);
487 i915_error_object_free(struct drm_i915_error_object
*obj
)
494 for (page
= 0; page
< obj
->page_count
; page
++)
495 kfree(obj
->pages
[page
]);
501 i915_error_state_free(struct drm_device
*dev
,
502 struct drm_i915_error_state
*error
)
504 i915_error_object_free(error
->batchbuffer
[0]);
505 i915_error_object_free(error
->batchbuffer
[1]);
506 i915_error_object_free(error
->ringbuffer
);
507 kfree(error
->active_bo
);
512 i915_get_bbaddr(struct drm_device
*dev
, u32
*ring
)
516 if (IS_I830(dev
) || IS_845G(dev
))
517 cmd
= MI_BATCH_BUFFER
;
518 else if (IS_I965G(dev
))
519 cmd
= (MI_BATCH_BUFFER_START
| (2 << 6) |
520 MI_BATCH_NON_SECURE_I965
);
522 cmd
= (MI_BATCH_BUFFER_START
| (2 << 6));
524 return ring
[0] == cmd
? ring
[1] : 0;
528 i915_ringbuffer_last_batch(struct drm_device
*dev
)
530 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
534 /* Locate the current position in the ringbuffer and walk back
535 * to find the most recently dispatched batch buffer.
538 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
539 ring
= (u32
*)(dev_priv
->ring
.virtual_start
+ head
);
541 while (--ring
>= (u32
*)dev_priv
->ring
.virtual_start
) {
542 bbaddr
= i915_get_bbaddr(dev
, ring
);
548 ring
= (u32
*)(dev_priv
->ring
.virtual_start
+ dev_priv
->ring
.Size
);
549 while (--ring
>= (u32
*)dev_priv
->ring
.virtual_start
) {
550 bbaddr
= i915_get_bbaddr(dev
, ring
);
560 * i915_capture_error_state - capture an error record for later analysis
563 * Should be called when an error is detected (either a hang or an error
564 * interrupt) to capture error state from the time of the error. Fills
565 * out a structure which becomes available in debugfs for user level tools
568 static void i915_capture_error_state(struct drm_device
*dev
)
570 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
571 struct drm_i915_gem_object
*obj_priv
;
572 struct drm_i915_error_state
*error
;
573 struct drm_gem_object
*batchbuffer
[2];
578 spin_lock_irqsave(&dev_priv
->error_lock
, flags
);
579 error
= dev_priv
->first_error
;
580 spin_unlock_irqrestore(&dev_priv
->error_lock
, flags
);
584 error
= kmalloc(sizeof(*error
), GFP_ATOMIC
);
586 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
590 error
->seqno
= i915_get_gem_seqno(dev
);
591 error
->eir
= I915_READ(EIR
);
592 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
593 error
->pipeastat
= I915_READ(PIPEASTAT
);
594 error
->pipebstat
= I915_READ(PIPEBSTAT
);
595 error
->instpm
= I915_READ(INSTPM
);
596 if (!IS_I965G(dev
)) {
597 error
->ipeir
= I915_READ(IPEIR
);
598 error
->ipehr
= I915_READ(IPEHR
);
599 error
->instdone
= I915_READ(INSTDONE
);
600 error
->acthd
= I915_READ(ACTHD
);
603 error
->ipeir
= I915_READ(IPEIR_I965
);
604 error
->ipehr
= I915_READ(IPEHR_I965
);
605 error
->instdone
= I915_READ(INSTDONE_I965
);
606 error
->instps
= I915_READ(INSTPS
);
607 error
->instdone1
= I915_READ(INSTDONE1
);
608 error
->acthd
= I915_READ(ACTHD_I965
);
609 error
->bbaddr
= I915_READ64(BB_ADDR
);
612 bbaddr
= i915_ringbuffer_last_batch(dev
);
614 /* Grab the current batchbuffer, most likely to have crashed. */
615 batchbuffer
[0] = NULL
;
616 batchbuffer
[1] = NULL
;
618 list_for_each_entry(obj_priv
, &dev_priv
->mm
.active_list
, list
) {
619 struct drm_gem_object
*obj
= &obj_priv
->base
;
621 if (batchbuffer
[0] == NULL
&&
622 bbaddr
>= obj_priv
->gtt_offset
&&
623 bbaddr
< obj_priv
->gtt_offset
+ obj
->size
)
624 batchbuffer
[0] = obj
;
626 if (batchbuffer
[1] == NULL
&&
627 error
->acthd
>= obj_priv
->gtt_offset
&&
628 error
->acthd
< obj_priv
->gtt_offset
+ obj
->size
&&
629 batchbuffer
[0] != obj
)
630 batchbuffer
[1] = obj
;
635 /* We need to copy these to an anonymous buffer as the simplest
636 * method to avoid being overwritten by userpace.
638 error
->batchbuffer
[0] = i915_error_object_create(dev
, batchbuffer
[0]);
639 error
->batchbuffer
[1] = i915_error_object_create(dev
, batchbuffer
[1]);
641 /* Record the ringbuffer */
642 error
->ringbuffer
= i915_error_object_create(dev
, dev_priv
->ring
.ring_obj
);
644 /* Record buffers on the active list. */
645 error
->active_bo
= NULL
;
646 error
->active_bo_count
= 0;
649 error
->active_bo
= kmalloc(sizeof(*error
->active_bo
)*count
,
652 if (error
->active_bo
) {
654 list_for_each_entry(obj_priv
, &dev_priv
->mm
.active_list
, list
) {
655 struct drm_gem_object
*obj
= &obj_priv
->base
;
657 error
->active_bo
[i
].size
= obj
->size
;
658 error
->active_bo
[i
].name
= obj
->name
;
659 error
->active_bo
[i
].seqno
= obj_priv
->last_rendering_seqno
;
660 error
->active_bo
[i
].gtt_offset
= obj_priv
->gtt_offset
;
661 error
->active_bo
[i
].read_domains
= obj
->read_domains
;
662 error
->active_bo
[i
].write_domain
= obj
->write_domain
;
663 error
->active_bo
[i
].fence_reg
= obj_priv
->fence_reg
;
664 error
->active_bo
[i
].pinned
= 0;
665 if (obj_priv
->pin_count
> 0)
666 error
->active_bo
[i
].pinned
= 1;
667 if (obj_priv
->user_pin_count
> 0)
668 error
->active_bo
[i
].pinned
= -1;
669 error
->active_bo
[i
].tiling
= obj_priv
->tiling_mode
;
670 error
->active_bo
[i
].dirty
= obj_priv
->dirty
;
671 error
->active_bo
[i
].purgeable
= obj_priv
->madv
!= I915_MADV_WILLNEED
;
676 error
->active_bo_count
= i
;
679 do_gettimeofday(&error
->time
);
681 spin_lock_irqsave(&dev_priv
->error_lock
, flags
);
682 if (dev_priv
->first_error
== NULL
) {
683 dev_priv
->first_error
= error
;
686 spin_unlock_irqrestore(&dev_priv
->error_lock
, flags
);
689 i915_error_state_free(dev
, error
);
692 void i915_destroy_error_state(struct drm_device
*dev
)
694 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
695 struct drm_i915_error_state
*error
;
697 spin_lock(&dev_priv
->error_lock
);
698 error
= dev_priv
->first_error
;
699 dev_priv
->first_error
= NULL
;
700 spin_unlock(&dev_priv
->error_lock
);
703 i915_error_state_free(dev
, error
);
707 * i915_handle_error - handle an error interrupt
710 * Do some basic checking of regsiter state at error interrupt time and
711 * dump it to the syslog. Also call i915_capture_error_state() to make
712 * sure we get a record and make it available in debugfs. Fire a uevent
713 * so userspace knows something bad happened (should trigger collection
714 * of a ring dump etc.).
716 static void i915_handle_error(struct drm_device
*dev
, bool wedged
)
718 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
719 u32 eir
= I915_READ(EIR
);
720 u32 pipea_stats
= I915_READ(PIPEASTAT
);
721 u32 pipeb_stats
= I915_READ(PIPEBSTAT
);
723 i915_capture_error_state(dev
);
725 printk(KERN_ERR
"render error detected, EIR: 0x%08x\n",
729 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
730 u32 ipeir
= I915_READ(IPEIR_I965
);
732 printk(KERN_ERR
" IPEIR: 0x%08x\n",
733 I915_READ(IPEIR_I965
));
734 printk(KERN_ERR
" IPEHR: 0x%08x\n",
735 I915_READ(IPEHR_I965
));
736 printk(KERN_ERR
" INSTDONE: 0x%08x\n",
737 I915_READ(INSTDONE_I965
));
738 printk(KERN_ERR
" INSTPS: 0x%08x\n",
740 printk(KERN_ERR
" INSTDONE1: 0x%08x\n",
741 I915_READ(INSTDONE1
));
742 printk(KERN_ERR
" ACTHD: 0x%08x\n",
743 I915_READ(ACTHD_I965
));
744 I915_WRITE(IPEIR_I965
, ipeir
);
745 (void)I915_READ(IPEIR_I965
);
747 if (eir
& GM45_ERROR_PAGE_TABLE
) {
748 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
749 printk(KERN_ERR
"page table error\n");
750 printk(KERN_ERR
" PGTBL_ER: 0x%08x\n",
752 I915_WRITE(PGTBL_ER
, pgtbl_err
);
753 (void)I915_READ(PGTBL_ER
);
758 if (eir
& I915_ERROR_PAGE_TABLE
) {
759 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
760 printk(KERN_ERR
"page table error\n");
761 printk(KERN_ERR
" PGTBL_ER: 0x%08x\n",
763 I915_WRITE(PGTBL_ER
, pgtbl_err
);
764 (void)I915_READ(PGTBL_ER
);
768 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
769 printk(KERN_ERR
"memory refresh error\n");
770 printk(KERN_ERR
"PIPEASTAT: 0x%08x\n",
772 printk(KERN_ERR
"PIPEBSTAT: 0x%08x\n",
774 /* pipestat has already been acked */
776 if (eir
& I915_ERROR_INSTRUCTION
) {
777 printk(KERN_ERR
"instruction error\n");
778 printk(KERN_ERR
" INSTPM: 0x%08x\n",
780 if (!IS_I965G(dev
)) {
781 u32 ipeir
= I915_READ(IPEIR
);
783 printk(KERN_ERR
" IPEIR: 0x%08x\n",
785 printk(KERN_ERR
" IPEHR: 0x%08x\n",
787 printk(KERN_ERR
" INSTDONE: 0x%08x\n",
788 I915_READ(INSTDONE
));
789 printk(KERN_ERR
" ACTHD: 0x%08x\n",
791 I915_WRITE(IPEIR
, ipeir
);
792 (void)I915_READ(IPEIR
);
794 u32 ipeir
= I915_READ(IPEIR_I965
);
796 printk(KERN_ERR
" IPEIR: 0x%08x\n",
797 I915_READ(IPEIR_I965
));
798 printk(KERN_ERR
" IPEHR: 0x%08x\n",
799 I915_READ(IPEHR_I965
));
800 printk(KERN_ERR
" INSTDONE: 0x%08x\n",
801 I915_READ(INSTDONE_I965
));
802 printk(KERN_ERR
" INSTPS: 0x%08x\n",
804 printk(KERN_ERR
" INSTDONE1: 0x%08x\n",
805 I915_READ(INSTDONE1
));
806 printk(KERN_ERR
" ACTHD: 0x%08x\n",
807 I915_READ(ACTHD_I965
));
808 I915_WRITE(IPEIR_I965
, ipeir
);
809 (void)I915_READ(IPEIR_I965
);
813 I915_WRITE(EIR
, eir
);
814 (void)I915_READ(EIR
);
815 eir
= I915_READ(EIR
);
818 * some errors might have become stuck,
821 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
822 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
823 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
827 atomic_set(&dev_priv
->mm
.wedged
, 1);
830 * Wakeup waiting processes so they don't hang
832 DRM_WAKEUP(&dev_priv
->irq_queue
);
835 queue_work(dev_priv
->wq
, &dev_priv
->error_work
);
838 irqreturn_t
i915_driver_irq_handler(DRM_IRQ_ARGS
)
840 struct drm_device
*dev
= (struct drm_device
*) arg
;
841 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
842 struct drm_i915_master_private
*master_priv
;
844 u32 pipea_stats
, pipeb_stats
;
848 unsigned long irqflags
;
852 atomic_inc(&dev_priv
->irq_received
);
854 if (HAS_PCH_SPLIT(dev
))
855 return ironlake_irq_handler(dev
);
857 iir
= I915_READ(IIR
);
860 vblank_status
= I915_START_VBLANK_INTERRUPT_STATUS
;
861 vblank_enable
= PIPE_START_VBLANK_INTERRUPT_ENABLE
;
863 vblank_status
= I915_VBLANK_INTERRUPT_STATUS
;
864 vblank_enable
= I915_VBLANK_INTERRUPT_ENABLE
;
868 irq_received
= iir
!= 0;
870 /* Can't rely on pipestat interrupt bit in iir as it might
871 * have been cleared after the pipestat interrupt was received.
872 * It doesn't set the bit in iir again, but it still produces
873 * interrupts (for non-MSI).
875 spin_lock_irqsave(&dev_priv
->user_irq_lock
, irqflags
);
876 pipea_stats
= I915_READ(PIPEASTAT
);
877 pipeb_stats
= I915_READ(PIPEBSTAT
);
879 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
880 i915_handle_error(dev
, false);
883 * Clear the PIPE(A|B)STAT regs before the IIR
885 if (pipea_stats
& 0x8000ffff) {
886 if (pipea_stats
& PIPE_FIFO_UNDERRUN_STATUS
)
887 DRM_DEBUG_DRIVER("pipe a underrun\n");
888 I915_WRITE(PIPEASTAT
, pipea_stats
);
892 if (pipeb_stats
& 0x8000ffff) {
893 if (pipeb_stats
& PIPE_FIFO_UNDERRUN_STATUS
)
894 DRM_DEBUG_DRIVER("pipe b underrun\n");
895 I915_WRITE(PIPEBSTAT
, pipeb_stats
);
898 spin_unlock_irqrestore(&dev_priv
->user_irq_lock
, irqflags
);
905 /* Consume port. Then clear IIR or we'll miss events */
906 if ((I915_HAS_HOTPLUG(dev
)) &&
907 (iir
& I915_DISPLAY_PORT_INTERRUPT
)) {
908 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
910 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
912 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
913 queue_work(dev_priv
->wq
,
914 &dev_priv
->hotplug_work
);
916 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
917 I915_READ(PORT_HOTPLUG_STAT
);
920 I915_WRITE(IIR
, iir
);
921 new_iir
= I915_READ(IIR
); /* Flush posted writes */
923 if (dev
->primary
->master
) {
924 master_priv
= dev
->primary
->master
->driver_priv
;
925 if (master_priv
->sarea_priv
)
926 master_priv
->sarea_priv
->last_dispatch
=
927 READ_BREADCRUMB(dev_priv
);
930 if (iir
& I915_USER_INTERRUPT
) {
931 u32 seqno
= i915_get_gem_seqno(dev
);
932 dev_priv
->mm
.irq_gem_seqno
= seqno
;
933 trace_i915_gem_request_complete(dev
, seqno
);
934 DRM_WAKEUP(&dev_priv
->irq_queue
);
935 dev_priv
->hangcheck_count
= 0;
936 mod_timer(&dev_priv
->hangcheck_timer
, jiffies
+ DRM_I915_HANGCHECK_PERIOD
);
939 if (iir
& I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
)
940 intel_prepare_page_flip(dev
, 0);
942 if (iir
& I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
)
943 intel_prepare_page_flip(dev
, 1);
945 if (pipea_stats
& vblank_status
) {
947 drm_handle_vblank(dev
, 0);
948 intel_finish_page_flip(dev
, 0);
951 if (pipeb_stats
& vblank_status
) {
953 drm_handle_vblank(dev
, 1);
954 intel_finish_page_flip(dev
, 1);
957 if ((pipea_stats
& I915_LEGACY_BLC_EVENT_STATUS
) ||
958 (pipeb_stats
& I915_LEGACY_BLC_EVENT_STATUS
) ||
959 (iir
& I915_ASLE_INTERRUPT
))
960 opregion_asle_intr(dev
);
962 /* With MSI, interrupts are only generated when iir
963 * transitions from zero to nonzero. If another bit got
964 * set while we were handling the existing iir bits, then
965 * we would never get another interrupt.
967 * This is fine on non-MSI as well, as if we hit this path
968 * we avoid exiting the interrupt handler only to generate
971 * Note that for MSI this could cause a stray interrupt report
972 * if an interrupt landed in the time between writing IIR and
973 * the posting read. This should be rare enough to never
974 * trigger the 99% of 100,000 interrupts test for disabling
983 static int i915_emit_irq(struct drm_device
* dev
)
985 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
986 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
989 i915_kernel_lost_context(dev
);
991 DRM_DEBUG_DRIVER("\n");
994 if (dev_priv
->counter
> 0x7FFFFFFFUL
)
995 dev_priv
->counter
= 1;
996 if (master_priv
->sarea_priv
)
997 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
;
1000 OUT_RING(MI_STORE_DWORD_INDEX
);
1001 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
1002 OUT_RING(dev_priv
->counter
);
1003 OUT_RING(MI_USER_INTERRUPT
);
1006 return dev_priv
->counter
;
1009 void i915_user_irq_get(struct drm_device
*dev
)
1011 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1012 unsigned long irqflags
;
1014 spin_lock_irqsave(&dev_priv
->user_irq_lock
, irqflags
);
1015 if (dev
->irq_enabled
&& (++dev_priv
->user_irq_refcount
== 1)) {
1016 if (HAS_PCH_SPLIT(dev
))
1017 ironlake_enable_graphics_irq(dev_priv
, GT_PIPE_NOTIFY
);
1019 i915_enable_irq(dev_priv
, I915_USER_INTERRUPT
);
1021 spin_unlock_irqrestore(&dev_priv
->user_irq_lock
, irqflags
);
1024 void i915_user_irq_put(struct drm_device
*dev
)
1026 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1027 unsigned long irqflags
;
1029 spin_lock_irqsave(&dev_priv
->user_irq_lock
, irqflags
);
1030 BUG_ON(dev
->irq_enabled
&& dev_priv
->user_irq_refcount
<= 0);
1031 if (dev
->irq_enabled
&& (--dev_priv
->user_irq_refcount
== 0)) {
1032 if (HAS_PCH_SPLIT(dev
))
1033 ironlake_disable_graphics_irq(dev_priv
, GT_PIPE_NOTIFY
);
1035 i915_disable_irq(dev_priv
, I915_USER_INTERRUPT
);
1037 spin_unlock_irqrestore(&dev_priv
->user_irq_lock
, irqflags
);
1040 void i915_trace_irq_get(struct drm_device
*dev
, u32 seqno
)
1042 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1044 if (dev_priv
->trace_irq_seqno
== 0)
1045 i915_user_irq_get(dev
);
1047 dev_priv
->trace_irq_seqno
= seqno
;
1050 static int i915_wait_irq(struct drm_device
* dev
, int irq_nr
)
1052 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1053 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
1056 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr
,
1057 READ_BREADCRUMB(dev_priv
));
1059 if (READ_BREADCRUMB(dev_priv
) >= irq_nr
) {
1060 if (master_priv
->sarea_priv
)
1061 master_priv
->sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
1065 if (master_priv
->sarea_priv
)
1066 master_priv
->sarea_priv
->perf_boxes
|= I915_BOX_WAIT
;
1068 i915_user_irq_get(dev
);
1069 DRM_WAIT_ON(ret
, dev_priv
->irq_queue
, 3 * DRM_HZ
,
1070 READ_BREADCRUMB(dev_priv
) >= irq_nr
);
1071 i915_user_irq_put(dev
);
1073 if (ret
== -EBUSY
) {
1074 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1075 READ_BREADCRUMB(dev_priv
), (int)dev_priv
->counter
);
1081 /* Needs the lock as it touches the ring.
1083 int i915_irq_emit(struct drm_device
*dev
, void *data
,
1084 struct drm_file
*file_priv
)
1086 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1087 drm_i915_irq_emit_t
*emit
= data
;
1090 if (!dev_priv
|| !dev_priv
->ring
.virtual_start
) {
1091 DRM_ERROR("called with no initialization\n");
1095 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
1097 mutex_lock(&dev
->struct_mutex
);
1098 result
= i915_emit_irq(dev
);
1099 mutex_unlock(&dev
->struct_mutex
);
1101 if (DRM_COPY_TO_USER(emit
->irq_seq
, &result
, sizeof(int))) {
1102 DRM_ERROR("copy_to_user\n");
1109 /* Doesn't need the hardware lock.
1111 int i915_irq_wait(struct drm_device
*dev
, void *data
,
1112 struct drm_file
*file_priv
)
1114 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1115 drm_i915_irq_wait_t
*irqwait
= data
;
1118 DRM_ERROR("called with no initialization\n");
1122 return i915_wait_irq(dev
, irqwait
->irq_seq
);
1125 /* Called from drm generic code, passed 'crtc' which
1126 * we use as a pipe index
1128 int i915_enable_vblank(struct drm_device
*dev
, int pipe
)
1130 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1131 unsigned long irqflags
;
1132 int pipeconf_reg
= (pipe
== 0) ? PIPEACONF
: PIPEBCONF
;
1135 pipeconf
= I915_READ(pipeconf_reg
);
1136 if (!(pipeconf
& PIPEACONF_ENABLE
))
1139 spin_lock_irqsave(&dev_priv
->user_irq_lock
, irqflags
);
1140 if (HAS_PCH_SPLIT(dev
))
1141 ironlake_enable_display_irq(dev_priv
, (pipe
== 0) ?
1142 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
1143 else if (IS_I965G(dev
))
1144 i915_enable_pipestat(dev_priv
, pipe
,
1145 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1147 i915_enable_pipestat(dev_priv
, pipe
,
1148 PIPE_VBLANK_INTERRUPT_ENABLE
);
1149 spin_unlock_irqrestore(&dev_priv
->user_irq_lock
, irqflags
);
1153 /* Called from drm generic code, passed 'crtc' which
1154 * we use as a pipe index
1156 void i915_disable_vblank(struct drm_device
*dev
, int pipe
)
1158 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1159 unsigned long irqflags
;
1161 spin_lock_irqsave(&dev_priv
->user_irq_lock
, irqflags
);
1162 if (HAS_PCH_SPLIT(dev
))
1163 ironlake_disable_display_irq(dev_priv
, (pipe
== 0) ?
1164 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
1166 i915_disable_pipestat(dev_priv
, pipe
,
1167 PIPE_VBLANK_INTERRUPT_ENABLE
|
1168 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1169 spin_unlock_irqrestore(&dev_priv
->user_irq_lock
, irqflags
);
1172 void i915_enable_interrupt (struct drm_device
*dev
)
1174 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1176 if (!HAS_PCH_SPLIT(dev
))
1177 opregion_enable_asle(dev
);
1178 dev_priv
->irq_enabled
= 1;
1182 /* Set the vblank monitor pipe
1184 int i915_vblank_pipe_set(struct drm_device
*dev
, void *data
,
1185 struct drm_file
*file_priv
)
1187 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1190 DRM_ERROR("called with no initialization\n");
1197 int i915_vblank_pipe_get(struct drm_device
*dev
, void *data
,
1198 struct drm_file
*file_priv
)
1200 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1201 drm_i915_vblank_pipe_t
*pipe
= data
;
1204 DRM_ERROR("called with no initialization\n");
1208 pipe
->pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
1214 * Schedule buffer swap at given vertical blank.
1216 int i915_vblank_swap(struct drm_device
*dev
, void *data
,
1217 struct drm_file
*file_priv
)
1219 /* The delayed swap mechanism was fundamentally racy, and has been
1220 * removed. The model was that the client requested a delayed flip/swap
1221 * from the kernel, then waited for vblank before continuing to perform
1222 * rendering. The problem was that the kernel might wake the client
1223 * up before it dispatched the vblank swap (since the lock has to be
1224 * held while touching the ringbuffer), in which case the client would
1225 * clear and start the next frame before the swap occurred, and
1226 * flicker would occur in addition to likely missing the vblank.
1228 * In the absence of this ioctl, userland falls back to a correct path
1229 * of waiting for a vblank, then dispatching the swap on its own.
1230 * Context switching to userland and back is plenty fast enough for
1231 * meeting the requirements of vblank swapping.
1236 struct drm_i915_gem_request
*i915_get_tail_request(struct drm_device
*dev
) {
1237 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1238 return list_entry(dev_priv
->mm
.request_list
.prev
, struct drm_i915_gem_request
, list
);
1242 * This is called when the chip hasn't reported back with completed
1243 * batchbuffers in a long time. The first time this is called we simply record
1244 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1245 * again, we assume the chip is wedged and try to fix it.
1247 void i915_hangcheck_elapsed(unsigned long data
)
1249 struct drm_device
*dev
= (struct drm_device
*)data
;
1250 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1253 /* No reset support on this chip yet. */
1258 acthd
= I915_READ(ACTHD
);
1260 acthd
= I915_READ(ACTHD_I965
);
1262 /* If all work is done then ACTHD clearly hasn't advanced. */
1263 if (list_empty(&dev_priv
->mm
.request_list
) ||
1264 i915_seqno_passed(i915_get_gem_seqno(dev
), i915_get_tail_request(dev
)->seqno
)) {
1265 dev_priv
->hangcheck_count
= 0;
1269 if (dev_priv
->last_acthd
== acthd
&& dev_priv
->hangcheck_count
> 0) {
1270 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1271 i915_handle_error(dev
, true);
1275 /* Reset timer case chip hangs without another request being added */
1276 mod_timer(&dev_priv
->hangcheck_timer
, jiffies
+ DRM_I915_HANGCHECK_PERIOD
);
1278 if (acthd
!= dev_priv
->last_acthd
)
1279 dev_priv
->hangcheck_count
= 0;
1281 dev_priv
->hangcheck_count
++;
1283 dev_priv
->last_acthd
= acthd
;
1288 static void ironlake_irq_preinstall(struct drm_device
*dev
)
1290 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1292 I915_WRITE(HWSTAM
, 0xeffe);
1294 /* XXX hotplug from PCH */
1296 I915_WRITE(DEIMR
, 0xffffffff);
1297 I915_WRITE(DEIER
, 0x0);
1298 (void) I915_READ(DEIER
);
1301 I915_WRITE(GTIMR
, 0xffffffff);
1302 I915_WRITE(GTIER
, 0x0);
1303 (void) I915_READ(GTIER
);
1305 /* south display irq */
1306 I915_WRITE(SDEIMR
, 0xffffffff);
1307 I915_WRITE(SDEIER
, 0x0);
1308 (void) I915_READ(SDEIER
);
1311 static int ironlake_irq_postinstall(struct drm_device
*dev
)
1313 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1314 /* enable kind of interrupts always enabled */
1315 u32 display_mask
= DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
1316 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
;
1317 u32 render_mask
= GT_PIPE_NOTIFY
;
1318 u32 hotplug_mask
= SDE_CRT_HOTPLUG
| SDE_PORTB_HOTPLUG
|
1319 SDE_PORTC_HOTPLUG
| SDE_PORTD_HOTPLUG
;
1321 dev_priv
->irq_mask_reg
= ~display_mask
;
1322 dev_priv
->de_irq_enable_reg
= display_mask
| DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
;
1324 /* should always can generate irq */
1325 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
1326 I915_WRITE(DEIMR
, dev_priv
->irq_mask_reg
);
1327 I915_WRITE(DEIER
, dev_priv
->de_irq_enable_reg
);
1328 (void) I915_READ(DEIER
);
1330 /* user interrupt should be enabled, but masked initial */
1331 dev_priv
->gt_irq_mask_reg
= 0xffffffff;
1332 dev_priv
->gt_irq_enable_reg
= render_mask
;
1334 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1335 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask_reg
);
1336 I915_WRITE(GTIER
, dev_priv
->gt_irq_enable_reg
);
1337 (void) I915_READ(GTIER
);
1339 dev_priv
->pch_irq_mask_reg
= ~hotplug_mask
;
1340 dev_priv
->pch_irq_enable_reg
= hotplug_mask
;
1342 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
1343 I915_WRITE(SDEIMR
, dev_priv
->pch_irq_mask_reg
);
1344 I915_WRITE(SDEIER
, dev_priv
->pch_irq_enable_reg
);
1345 (void) I915_READ(SDEIER
);
1347 if (IS_IRONLAKE_M(dev
)) {
1348 /* Clear & enable PCU event interrupts */
1349 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
1350 I915_WRITE(DEIER
, I915_READ(DEIER
) | DE_PCU_EVENT
);
1351 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
1357 void i915_driver_irq_preinstall(struct drm_device
* dev
)
1359 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1361 atomic_set(&dev_priv
->irq_received
, 0);
1363 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
1364 INIT_WORK(&dev_priv
->error_work
, i915_error_work_func
);
1366 if (HAS_PCH_SPLIT(dev
)) {
1367 ironlake_irq_preinstall(dev
);
1371 if (I915_HAS_HOTPLUG(dev
)) {
1372 I915_WRITE(PORT_HOTPLUG_EN
, 0);
1373 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
1376 I915_WRITE(HWSTAM
, 0xeffe);
1377 I915_WRITE(PIPEASTAT
, 0);
1378 I915_WRITE(PIPEBSTAT
, 0);
1379 I915_WRITE(IMR
, 0xffffffff);
1380 I915_WRITE(IER
, 0x0);
1381 (void) I915_READ(IER
);
1385 * Must be called after intel_modeset_init or hotplug interrupts won't be
1386 * enabled correctly.
1388 int i915_driver_irq_postinstall(struct drm_device
*dev
)
1390 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1391 u32 enable_mask
= I915_INTERRUPT_ENABLE_FIX
| I915_INTERRUPT_ENABLE_VAR
;
1394 DRM_INIT_WAITQUEUE(&dev_priv
->irq_queue
);
1396 dev_priv
->vblank_pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
1398 if (HAS_PCH_SPLIT(dev
))
1399 return ironlake_irq_postinstall(dev
);
1401 /* Unmask the interrupts that we always want on. */
1402 dev_priv
->irq_mask_reg
= ~I915_INTERRUPT_ENABLE_FIX
;
1404 dev_priv
->pipestat
[0] = 0;
1405 dev_priv
->pipestat
[1] = 0;
1407 if (I915_HAS_HOTPLUG(dev
)) {
1408 u32 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
1410 /* Note HDMI and DP share bits */
1411 if (dev_priv
->hotplug_supported_mask
& HDMIB_HOTPLUG_INT_STATUS
)
1412 hotplug_en
|= HDMIB_HOTPLUG_INT_EN
;
1413 if (dev_priv
->hotplug_supported_mask
& HDMIC_HOTPLUG_INT_STATUS
)
1414 hotplug_en
|= HDMIC_HOTPLUG_INT_EN
;
1415 if (dev_priv
->hotplug_supported_mask
& HDMID_HOTPLUG_INT_STATUS
)
1416 hotplug_en
|= HDMID_HOTPLUG_INT_EN
;
1417 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS
)
1418 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
1419 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS
)
1420 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
1421 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
)
1422 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
1423 /* Ignore TV since it's buggy */
1425 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
1427 /* Enable in IER... */
1428 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
1429 /* and unmask in IMR */
1430 i915_enable_irq(dev_priv
, I915_DISPLAY_PORT_INTERRUPT
);
1434 * Enable some error detection, note the instruction error mask
1435 * bit is reserved, so we leave it masked.
1438 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
1439 GM45_ERROR_MEM_PRIV
|
1440 GM45_ERROR_CP_PRIV
|
1441 I915_ERROR_MEMORY_REFRESH
);
1443 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
1444 I915_ERROR_MEMORY_REFRESH
);
1446 I915_WRITE(EMR
, error_mask
);
1448 /* Disable pipe interrupt enables, clear pending pipe status */
1449 I915_WRITE(PIPEASTAT
, I915_READ(PIPEASTAT
) & 0x8000ffff);
1450 I915_WRITE(PIPEBSTAT
, I915_READ(PIPEBSTAT
) & 0x8000ffff);
1451 /* Clear pending interrupt status */
1452 I915_WRITE(IIR
, I915_READ(IIR
));
1454 I915_WRITE(IER
, enable_mask
);
1455 I915_WRITE(IMR
, dev_priv
->irq_mask_reg
);
1456 (void) I915_READ(IER
);
1458 opregion_enable_asle(dev
);
1463 static void ironlake_irq_uninstall(struct drm_device
*dev
)
1465 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1466 I915_WRITE(HWSTAM
, 0xffffffff);
1468 I915_WRITE(DEIMR
, 0xffffffff);
1469 I915_WRITE(DEIER
, 0x0);
1470 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
1472 I915_WRITE(GTIMR
, 0xffffffff);
1473 I915_WRITE(GTIER
, 0x0);
1474 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1477 void i915_driver_irq_uninstall(struct drm_device
* dev
)
1479 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1484 dev_priv
->vblank_pipe
= 0;
1486 if (HAS_PCH_SPLIT(dev
)) {
1487 ironlake_irq_uninstall(dev
);
1491 if (I915_HAS_HOTPLUG(dev
)) {
1492 I915_WRITE(PORT_HOTPLUG_EN
, 0);
1493 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
1496 I915_WRITE(HWSTAM
, 0xffffffff);
1497 I915_WRITE(PIPEASTAT
, 0);
1498 I915_WRITE(PIPEBSTAT
, 0);
1499 I915_WRITE(IMR
, 0xffffffff);
1500 I915_WRITE(IER
, 0x0);
1502 I915_WRITE(PIPEASTAT
, I915_READ(PIPEASTAT
) & 0x8000ffff);
1503 I915_WRITE(PIPEBSTAT
, I915_READ(PIPEBSTAT
) & 0x8000ffff);
1504 I915_WRITE(IIR
, I915_READ(IIR
));