]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/char/drm/i915_irq.c
Revert "drm/vbl rework: rework how the drm deals with vblank."
[mirror_ubuntu-zesty-kernel.git] / drivers / char / drm / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drm.h"
32 #include "i915_drv.h"
33
34 #define USER_INT_FLAG (1<<1)
35 #define VSYNC_PIPEB_FLAG (1<<5)
36 #define VSYNC_PIPEA_FLAG (1<<7)
37
38 #define MAX_NOPID ((u32)~0)
39
40 /**
41 * Emit blits for scheduled buffer swaps.
42 *
43 * This function will be called with the HW lock held.
44 */
45 static void i915_vblank_tasklet(struct drm_device *dev)
46 {
47 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
48 unsigned long irqflags;
49 struct list_head *list, *tmp, hits, *hit;
50 int nhits, nrects, slice[2], upper[2], lower[2], i;
51 unsigned counter[2] = { atomic_read(&dev->vbl_received),
52 atomic_read(&dev->vbl_received2) };
53 struct drm_drawable_info *drw;
54 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
55 u32 cpp = dev_priv->cpp;
56 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
57 XY_SRC_COPY_BLT_WRITE_ALPHA |
58 XY_SRC_COPY_BLT_WRITE_RGB)
59 : XY_SRC_COPY_BLT_CMD;
60 u32 src_pitch = sarea_priv->pitch * cpp;
61 u32 dst_pitch = sarea_priv->pitch * cpp;
62 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
63 RING_LOCALS;
64
65 if (sarea_priv->front_tiled) {
66 cmd |= XY_SRC_COPY_BLT_DST_TILED;
67 dst_pitch >>= 2;
68 }
69 if (sarea_priv->back_tiled) {
70 cmd |= XY_SRC_COPY_BLT_SRC_TILED;
71 src_pitch >>= 2;
72 }
73
74 DRM_DEBUG("\n");
75
76 INIT_LIST_HEAD(&hits);
77
78 nhits = nrects = 0;
79
80 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
81
82 /* Find buffer swaps scheduled for this vertical blank */
83 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
84 drm_i915_vbl_swap_t *vbl_swap =
85 list_entry(list, drm_i915_vbl_swap_t, head);
86
87 if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
88 continue;
89
90 list_del(list);
91 dev_priv->swaps_pending--;
92
93 spin_unlock(&dev_priv->swaps_lock);
94 spin_lock(&dev->drw_lock);
95
96 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
97
98 if (!drw) {
99 spin_unlock(&dev->drw_lock);
100 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
101 spin_lock(&dev_priv->swaps_lock);
102 continue;
103 }
104
105 list_for_each(hit, &hits) {
106 drm_i915_vbl_swap_t *swap_cmp =
107 list_entry(hit, drm_i915_vbl_swap_t, head);
108 struct drm_drawable_info *drw_cmp =
109 drm_get_drawable_info(dev, swap_cmp->drw_id);
110
111 if (drw_cmp &&
112 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
113 list_add_tail(list, hit);
114 break;
115 }
116 }
117
118 spin_unlock(&dev->drw_lock);
119
120 /* List of hits was empty, or we reached the end of it */
121 if (hit == &hits)
122 list_add_tail(list, hits.prev);
123
124 nhits++;
125
126 spin_lock(&dev_priv->swaps_lock);
127 }
128
129 if (nhits == 0) {
130 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
131 return;
132 }
133
134 spin_unlock(&dev_priv->swaps_lock);
135
136 i915_kernel_lost_context(dev);
137
138 if (IS_I965G(dev)) {
139 BEGIN_LP_RING(4);
140
141 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
142 OUT_RING(0);
143 OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
144 OUT_RING(0);
145 ADVANCE_LP_RING();
146 } else {
147 BEGIN_LP_RING(6);
148
149 OUT_RING(GFX_OP_DRAWRECT_INFO);
150 OUT_RING(0);
151 OUT_RING(0);
152 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
153 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
154 OUT_RING(0);
155
156 ADVANCE_LP_RING();
157 }
158
159 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
160
161 upper[0] = upper[1] = 0;
162 slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
163 slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
164 lower[0] = sarea_priv->pipeA_y + slice[0];
165 lower[1] = sarea_priv->pipeB_y + slice[0];
166
167 spin_lock(&dev->drw_lock);
168
169 /* Emit blits for buffer swaps, partitioning both outputs into as many
170 * slices as there are buffer swaps scheduled in order to avoid tearing
171 * (based on the assumption that a single buffer swap would always
172 * complete before scanout starts).
173 */
174 for (i = 0; i++ < nhits;
175 upper[0] = lower[0], lower[0] += slice[0],
176 upper[1] = lower[1], lower[1] += slice[1]) {
177 if (i == nhits)
178 lower[0] = lower[1] = sarea_priv->height;
179
180 list_for_each(hit, &hits) {
181 drm_i915_vbl_swap_t *swap_hit =
182 list_entry(hit, drm_i915_vbl_swap_t, head);
183 struct drm_clip_rect *rect;
184 int num_rects, pipe;
185 unsigned short top, bottom;
186
187 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
188
189 if (!drw)
190 continue;
191
192 rect = drw->rects;
193 pipe = swap_hit->pipe;
194 top = upper[pipe];
195 bottom = lower[pipe];
196
197 for (num_rects = drw->num_rects; num_rects--; rect++) {
198 int y1 = max(rect->y1, top);
199 int y2 = min(rect->y2, bottom);
200
201 if (y1 >= y2)
202 continue;
203
204 BEGIN_LP_RING(8);
205
206 OUT_RING(cmd);
207 OUT_RING(ropcpp | dst_pitch);
208 OUT_RING((y1 << 16) | rect->x1);
209 OUT_RING((y2 << 16) | rect->x2);
210 OUT_RING(sarea_priv->front_offset);
211 OUT_RING((y1 << 16) | rect->x1);
212 OUT_RING(src_pitch);
213 OUT_RING(sarea_priv->back_offset);
214
215 ADVANCE_LP_RING();
216 }
217 }
218 }
219
220 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
221
222 list_for_each_safe(hit, tmp, &hits) {
223 drm_i915_vbl_swap_t *swap_hit =
224 list_entry(hit, drm_i915_vbl_swap_t, head);
225
226 list_del(hit);
227
228 drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
229 }
230 }
231
232 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
233 {
234 struct drm_device *dev = (struct drm_device *) arg;
235 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
236 u16 temp;
237 u32 pipea_stats, pipeb_stats;
238
239 pipea_stats = I915_READ(I915REG_PIPEASTAT);
240 pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
241
242 temp = I915_READ16(I915REG_INT_IDENTITY_R);
243
244 temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
245
246 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
247
248 if (temp == 0)
249 return IRQ_NONE;
250
251 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
252 (void) I915_READ16(I915REG_INT_IDENTITY_R);
253 DRM_READMEMORYBARRIER();
254
255 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
256
257 if (temp & USER_INT_FLAG)
258 DRM_WAKEUP(&dev_priv->irq_queue);
259
260 if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
261 int vblank_pipe = dev_priv->vblank_pipe;
262
263 if ((vblank_pipe &
264 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
265 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
266 if (temp & VSYNC_PIPEA_FLAG)
267 atomic_inc(&dev->vbl_received);
268 if (temp & VSYNC_PIPEB_FLAG)
269 atomic_inc(&dev->vbl_received2);
270 } else if (((temp & VSYNC_PIPEA_FLAG) &&
271 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
272 ((temp & VSYNC_PIPEB_FLAG) &&
273 (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
274 atomic_inc(&dev->vbl_received);
275
276 DRM_WAKEUP(&dev->vbl_queue);
277 drm_vbl_send_signals(dev);
278
279 if (dev_priv->swaps_pending > 0)
280 drm_locked_tasklet(dev, i915_vblank_tasklet);
281 I915_WRITE(I915REG_PIPEASTAT,
282 pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
283 I915_VBLANK_CLEAR);
284 I915_WRITE(I915REG_PIPEBSTAT,
285 pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
286 I915_VBLANK_CLEAR);
287 }
288
289 return IRQ_HANDLED;
290 }
291
292 static int i915_emit_irq(struct drm_device * dev)
293 {
294 drm_i915_private_t *dev_priv = dev->dev_private;
295 RING_LOCALS;
296
297 i915_kernel_lost_context(dev);
298
299 DRM_DEBUG("\n");
300
301 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
302
303 if (dev_priv->counter > 0x7FFFFFFFUL)
304 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
305
306 BEGIN_LP_RING(6);
307 OUT_RING(CMD_STORE_DWORD_IDX);
308 OUT_RING(20);
309 OUT_RING(dev_priv->counter);
310 OUT_RING(0);
311 OUT_RING(0);
312 OUT_RING(GFX_OP_USER_INTERRUPT);
313 ADVANCE_LP_RING();
314
315 return dev_priv->counter;
316 }
317
318 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
319 {
320 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
321 int ret = 0;
322
323 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
324 READ_BREADCRUMB(dev_priv));
325
326 if (READ_BREADCRUMB(dev_priv) >= irq_nr)
327 return 0;
328
329 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
330
331 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
332 READ_BREADCRUMB(dev_priv) >= irq_nr);
333
334 if (ret == -EBUSY) {
335 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
336 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
337 }
338
339 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
340 return ret;
341 }
342
343 static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
344 atomic_t *counter)
345 {
346 drm_i915_private_t *dev_priv = dev->dev_private;
347 unsigned int cur_vblank;
348 int ret = 0;
349
350 if (!dev_priv) {
351 DRM_ERROR("called with no initialization\n");
352 return -EINVAL;
353 }
354
355 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
356 (((cur_vblank = atomic_read(counter))
357 - *sequence) <= (1<<23)));
358
359 *sequence = cur_vblank;
360
361 return ret;
362 }
363
364
365 int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
366 {
367 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
368 }
369
370 int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
371 {
372 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
373 }
374
375 /* Needs the lock as it touches the ring.
376 */
377 int i915_irq_emit(struct drm_device *dev, void *data,
378 struct drm_file *file_priv)
379 {
380 drm_i915_private_t *dev_priv = dev->dev_private;
381 drm_i915_irq_emit_t *emit = data;
382 int result;
383
384 LOCK_TEST_WITH_RETURN(dev, file_priv);
385
386 if (!dev_priv) {
387 DRM_ERROR("called with no initialization\n");
388 return -EINVAL;
389 }
390
391 result = i915_emit_irq(dev);
392
393 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
394 DRM_ERROR("copy_to_user\n");
395 return -EFAULT;
396 }
397
398 return 0;
399 }
400
401 /* Doesn't need the hardware lock.
402 */
403 int i915_irq_wait(struct drm_device *dev, void *data,
404 struct drm_file *file_priv)
405 {
406 drm_i915_private_t *dev_priv = dev->dev_private;
407 drm_i915_irq_wait_t *irqwait = data;
408
409 if (!dev_priv) {
410 DRM_ERROR("called with no initialization\n");
411 return -EINVAL;
412 }
413
414 return i915_wait_irq(dev, irqwait->irq_seq);
415 }
416
417 static void i915_enable_interrupt (struct drm_device *dev)
418 {
419 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
420 u16 flag;
421
422 flag = 0;
423 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
424 flag |= VSYNC_PIPEA_FLAG;
425 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
426 flag |= VSYNC_PIPEB_FLAG;
427
428 I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
429 }
430
431 /* Set the vblank monitor pipe
432 */
433 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
434 struct drm_file *file_priv)
435 {
436 drm_i915_private_t *dev_priv = dev->dev_private;
437 drm_i915_vblank_pipe_t *pipe = data;
438
439 if (!dev_priv) {
440 DRM_ERROR("called with no initialization\n");
441 return -EINVAL;
442 }
443
444 if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
445 DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
446 return -EINVAL;
447 }
448
449 dev_priv->vblank_pipe = pipe->pipe;
450
451 i915_enable_interrupt (dev);
452
453 return 0;
454 }
455
456 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
457 struct drm_file *file_priv)
458 {
459 drm_i915_private_t *dev_priv = dev->dev_private;
460 drm_i915_vblank_pipe_t *pipe = data;
461 u16 flag;
462
463 if (!dev_priv) {
464 DRM_ERROR("called with no initialization\n");
465 return -EINVAL;
466 }
467
468 flag = I915_READ(I915REG_INT_ENABLE_R);
469 pipe->pipe = 0;
470 if (flag & VSYNC_PIPEA_FLAG)
471 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
472 if (flag & VSYNC_PIPEB_FLAG)
473 pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
474
475 return 0;
476 }
477
478 /**
479 * Schedule buffer swap at given vertical blank.
480 */
481 int i915_vblank_swap(struct drm_device *dev, void *data,
482 struct drm_file *file_priv)
483 {
484 drm_i915_private_t *dev_priv = dev->dev_private;
485 drm_i915_vblank_swap_t *swap = data;
486 drm_i915_vbl_swap_t *vbl_swap;
487 unsigned int pipe, seqtype, curseq;
488 unsigned long irqflags;
489 struct list_head *list;
490
491 if (!dev_priv) {
492 DRM_ERROR("%s called with no initialization\n", __func__);
493 return -EINVAL;
494 }
495
496 if (dev_priv->sarea_priv->rotation) {
497 DRM_DEBUG("Rotation not supported\n");
498 return -EINVAL;
499 }
500
501 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
502 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
503 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
504 return -EINVAL;
505 }
506
507 pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
508
509 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
510
511 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
512 DRM_ERROR("Invalid pipe %d\n", pipe);
513 return -EINVAL;
514 }
515
516 spin_lock_irqsave(&dev->drw_lock, irqflags);
517
518 if (!drm_get_drawable_info(dev, swap->drawable)) {
519 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
520 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
521 return -EINVAL;
522 }
523
524 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
525
526 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
527
528 if (seqtype == _DRM_VBLANK_RELATIVE)
529 swap->sequence += curseq;
530
531 if ((curseq - swap->sequence) <= (1<<23)) {
532 if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
533 swap->sequence = curseq + 1;
534 } else {
535 DRM_DEBUG("Missed target sequence\n");
536 return -EINVAL;
537 }
538 }
539
540 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
541
542 list_for_each(list, &dev_priv->vbl_swaps.head) {
543 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
544
545 if (vbl_swap->drw_id == swap->drawable &&
546 vbl_swap->pipe == pipe &&
547 vbl_swap->sequence == swap->sequence) {
548 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
549 DRM_DEBUG("Already scheduled\n");
550 return 0;
551 }
552 }
553
554 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
555
556 if (dev_priv->swaps_pending >= 100) {
557 DRM_DEBUG("Too many swaps queued\n");
558 return -EBUSY;
559 }
560
561 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
562
563 if (!vbl_swap) {
564 DRM_ERROR("Failed to allocate memory to queue swap\n");
565 return -ENOMEM;
566 }
567
568 DRM_DEBUG("\n");
569
570 vbl_swap->drw_id = swap->drawable;
571 vbl_swap->pipe = pipe;
572 vbl_swap->sequence = swap->sequence;
573
574 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
575
576 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
577 dev_priv->swaps_pending++;
578
579 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
580
581 return 0;
582 }
583
584 /* drm_dma.h hooks
585 */
586 void i915_driver_irq_preinstall(struct drm_device * dev)
587 {
588 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
589
590 I915_WRITE16(I915REG_HWSTAM, 0xfffe);
591 I915_WRITE16(I915REG_INT_MASK_R, 0x0);
592 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
593 }
594
595 void i915_driver_irq_postinstall(struct drm_device * dev)
596 {
597 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
598
599 spin_lock_init(&dev_priv->swaps_lock);
600 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
601 dev_priv->swaps_pending = 0;
602
603 if (!dev_priv->vblank_pipe)
604 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
605 i915_enable_interrupt(dev);
606 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
607 }
608
609 void i915_driver_irq_uninstall(struct drm_device * dev)
610 {
611 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
612 u16 temp;
613
614 if (!dev_priv)
615 return;
616
617 I915_WRITE16(I915REG_HWSTAM, 0xffff);
618 I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
619 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
620
621 temp = I915_READ16(I915REG_INT_IDENTITY_R);
622 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
623 }