1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
31 #include "drm_crtc_helper.h"
32 #include "drm_fb_helper.h"
33 #include "intel_drv.h"
36 #include "i915_trace.h"
37 #include "../../../platform/x86/intel_ips.h"
38 #include <linux/pci.h>
39 #include <linux/vgaarb.h>
40 #include <linux/acpi.h>
41 #include <linux/pnp.h>
42 #include <linux/vga_switcheroo.h>
43 #include <linux/slab.h>
44 #include <acpi/video.h>
47 * Sets up the hardware status page for devices that need a physical address
50 static int i915_init_phys_hws(struct drm_device
*dev
)
52 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
53 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
55 /* Program Hardware Status Page */
56 dev_priv
->status_page_dmah
=
57 drm_pci_alloc(dev
, PAGE_SIZE
, PAGE_SIZE
);
59 if (!dev_priv
->status_page_dmah
) {
60 DRM_ERROR("Can not allocate hardware status page\n");
63 ring
->status_page
.page_addr
= dev_priv
->status_page_dmah
->vaddr
;
64 dev_priv
->dma_status_page
= dev_priv
->status_page_dmah
->busaddr
;
66 memset(ring
->status_page
.page_addr
, 0, PAGE_SIZE
);
68 if (INTEL_INFO(dev
)->gen
>= 4)
69 dev_priv
->dma_status_page
|= (dev_priv
->dma_status_page
>> 28) &
72 I915_WRITE(HWS_PGA
, dev_priv
->dma_status_page
);
73 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
78 * Frees the hardware status page, whether it's a physical address or a virtual
79 * address set up by the X Server.
81 static void i915_free_hws(struct drm_device
*dev
)
83 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
84 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
86 if (dev_priv
->status_page_dmah
) {
87 drm_pci_free(dev
, dev_priv
->status_page_dmah
);
88 dev_priv
->status_page_dmah
= NULL
;
91 if (ring
->status_page
.gfx_addr
) {
92 ring
->status_page
.gfx_addr
= 0;
93 drm_core_ioremapfree(&dev_priv
->hws_map
, dev
);
96 /* Need to rewrite hardware status page */
97 I915_WRITE(HWS_PGA
, 0x1ffff000);
100 void i915_kernel_lost_context(struct drm_device
* dev
)
102 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
103 struct drm_i915_master_private
*master_priv
;
104 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
107 * We should never lose context on the ring with modesetting
108 * as we don't expose it to userspace
110 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
113 ring
->head
= I915_READ_HEAD(ring
) & HEAD_ADDR
;
114 ring
->tail
= I915_READ_TAIL(ring
) & TAIL_ADDR
;
115 ring
->space
= ring
->head
- (ring
->tail
+ 8);
117 ring
->space
+= ring
->size
;
119 if (!dev
->primary
->master
)
122 master_priv
= dev
->primary
->master
->driver_priv
;
123 if (ring
->head
== ring
->tail
&& master_priv
->sarea_priv
)
124 master_priv
->sarea_priv
->perf_boxes
|= I915_BOX_RING_EMPTY
;
127 static int i915_dma_cleanup(struct drm_device
* dev
)
129 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
132 /* Make sure interrupts are disabled here because the uninstall ioctl
133 * may not have been called from userspace and after dev_private
134 * is freed, it's too late.
136 if (dev
->irq_enabled
)
137 drm_irq_uninstall(dev
);
139 mutex_lock(&dev
->struct_mutex
);
140 for (i
= 0; i
< I915_NUM_RINGS
; i
++)
141 intel_cleanup_ring_buffer(&dev_priv
->ring
[i
]);
142 mutex_unlock(&dev
->struct_mutex
);
144 /* Clear the HWS virtual address at teardown */
145 if (I915_NEED_GFX_HWS(dev
))
151 static int i915_initialize(struct drm_device
* dev
, drm_i915_init_t
* init
)
153 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
154 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
155 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
157 master_priv
->sarea
= drm_getsarea(dev
);
158 if (master_priv
->sarea
) {
159 master_priv
->sarea_priv
= (drm_i915_sarea_t
*)
160 ((u8
*)master_priv
->sarea
->handle
+ init
->sarea_priv_offset
);
162 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
165 if (init
->ring_size
!= 0) {
166 if (ring
->obj
!= NULL
) {
167 i915_dma_cleanup(dev
);
168 DRM_ERROR("Client tried to initialize ringbuffer in "
173 ring
->size
= init
->ring_size
;
175 ring
->map
.offset
= init
->ring_start
;
176 ring
->map
.size
= init
->ring_size
;
181 drm_core_ioremap_wc(&ring
->map
, dev
);
183 if (ring
->map
.handle
== NULL
) {
184 i915_dma_cleanup(dev
);
185 DRM_ERROR("can not ioremap virtual address for"
191 ring
->virtual_start
= ring
->map
.handle
;
193 dev_priv
->cpp
= init
->cpp
;
194 dev_priv
->back_offset
= init
->back_offset
;
195 dev_priv
->front_offset
= init
->front_offset
;
196 dev_priv
->current_page
= 0;
197 if (master_priv
->sarea_priv
)
198 master_priv
->sarea_priv
->pf_current_page
= 0;
200 /* Allow hardware batchbuffers unless told otherwise.
202 dev_priv
->allow_batchbuffer
= 1;
207 static int i915_dma_resume(struct drm_device
* dev
)
209 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
210 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
212 DRM_DEBUG_DRIVER("%s\n", __func__
);
214 if (ring
->map
.handle
== NULL
) {
215 DRM_ERROR("can not ioremap virtual address for"
220 /* Program Hardware Status Page */
221 if (!ring
->status_page
.page_addr
) {
222 DRM_ERROR("Can not find hardware status page\n");
225 DRM_DEBUG_DRIVER("hw status page @ %p\n",
226 ring
->status_page
.page_addr
);
227 if (ring
->status_page
.gfx_addr
!= 0)
228 intel_ring_setup_status_page(ring
);
230 I915_WRITE(HWS_PGA
, dev_priv
->dma_status_page
);
232 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
237 static int i915_dma_init(struct drm_device
*dev
, void *data
,
238 struct drm_file
*file_priv
)
240 drm_i915_init_t
*init
= data
;
243 switch (init
->func
) {
245 retcode
= i915_initialize(dev
, init
);
247 case I915_CLEANUP_DMA
:
248 retcode
= i915_dma_cleanup(dev
);
250 case I915_RESUME_DMA
:
251 retcode
= i915_dma_resume(dev
);
261 /* Implement basically the same security restrictions as hardware does
262 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
264 * Most of the calculations below involve calculating the size of a
265 * particular instruction. It's important to get the size right as
266 * that tells us where the next instruction to check is. Any illegal
267 * instruction detected will be given a size of zero, which is a
268 * signal to abort the rest of the buffer.
270 static int validate_cmd(int cmd
)
272 switch (((cmd
>> 29) & 0x7)) {
274 switch ((cmd
>> 23) & 0x3f) {
276 return 1; /* MI_NOOP */
278 return 1; /* MI_FLUSH */
280 return 0; /* disallow everything else */
284 return 0; /* reserved */
286 return (cmd
& 0xff) + 2; /* 2d commands */
288 if (((cmd
>> 24) & 0x1f) <= 0x18)
291 switch ((cmd
>> 24) & 0x1f) {
295 switch ((cmd
>> 16) & 0xff) {
297 return (cmd
& 0x1f) + 2;
299 return (cmd
& 0xf) + 2;
301 return (cmd
& 0xffff) + 2;
305 return (cmd
& 0xffff) + 1;
309 if ((cmd
& (1 << 23)) == 0) /* inline vertices */
310 return (cmd
& 0x1ffff) + 2;
311 else if (cmd
& (1 << 17)) /* indirect random */
312 if ((cmd
& 0xffff) == 0)
313 return 0; /* unknown length, too hard */
315 return (((cmd
& 0xffff) + 1) / 2) + 1;
317 return 2; /* indirect sequential */
328 static int i915_emit_cmds(struct drm_device
* dev
, int *buffer
, int dwords
)
330 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
333 if ((dwords
+1) * sizeof(int) >= LP_RING(dev_priv
)->size
- 8)
336 for (i
= 0; i
< dwords
;) {
337 int sz
= validate_cmd(buffer
[i
]);
338 if (sz
== 0 || i
+ sz
> dwords
)
343 ret
= BEGIN_LP_RING((dwords
+1)&~1);
347 for (i
= 0; i
< dwords
; i
++)
358 i915_emit_box(struct drm_device
*dev
,
359 struct drm_clip_rect
*box
,
362 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
365 if (box
->y2
<= box
->y1
|| box
->x2
<= box
->x1
||
366 box
->y2
<= 0 || box
->x2
<= 0) {
367 DRM_ERROR("Bad box %d,%d..%d,%d\n",
368 box
->x1
, box
->y1
, box
->x2
, box
->y2
);
372 if (INTEL_INFO(dev
)->gen
>= 4) {
373 ret
= BEGIN_LP_RING(4);
377 OUT_RING(GFX_OP_DRAWRECT_INFO_I965
);
378 OUT_RING((box
->x1
& 0xffff) | (box
->y1
<< 16));
379 OUT_RING(((box
->x2
- 1) & 0xffff) | ((box
->y2
- 1) << 16));
382 ret
= BEGIN_LP_RING(6);
386 OUT_RING(GFX_OP_DRAWRECT_INFO
);
388 OUT_RING((box
->x1
& 0xffff) | (box
->y1
<< 16));
389 OUT_RING(((box
->x2
- 1) & 0xffff) | ((box
->y2
- 1) << 16));
398 /* XXX: Emitting the counter should really be moved to part of the IRQ
399 * emit. For now, do it in both places:
402 static void i915_emit_breadcrumb(struct drm_device
*dev
)
404 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
405 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
408 if (dev_priv
->counter
> 0x7FFFFFFFUL
)
409 dev_priv
->counter
= 0;
410 if (master_priv
->sarea_priv
)
411 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
;
413 if (BEGIN_LP_RING(4) == 0) {
414 OUT_RING(MI_STORE_DWORD_INDEX
);
415 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
416 OUT_RING(dev_priv
->counter
);
422 static int i915_dispatch_cmdbuffer(struct drm_device
* dev
,
423 drm_i915_cmdbuffer_t
*cmd
,
424 struct drm_clip_rect
*cliprects
,
427 int nbox
= cmd
->num_cliprects
;
428 int i
= 0, count
, ret
;
431 DRM_ERROR("alignment");
435 i915_kernel_lost_context(dev
);
437 count
= nbox
? nbox
: 1;
439 for (i
= 0; i
< count
; i
++) {
441 ret
= i915_emit_box(dev
, &cliprects
[i
],
447 ret
= i915_emit_cmds(dev
, cmdbuf
, cmd
->sz
/ 4);
452 i915_emit_breadcrumb(dev
);
456 static int i915_dispatch_batchbuffer(struct drm_device
* dev
,
457 drm_i915_batchbuffer_t
* batch
,
458 struct drm_clip_rect
*cliprects
)
460 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
461 int nbox
= batch
->num_cliprects
;
464 if ((batch
->start
| batch
->used
) & 0x7) {
465 DRM_ERROR("alignment");
469 i915_kernel_lost_context(dev
);
471 count
= nbox
? nbox
: 1;
472 for (i
= 0; i
< count
; i
++) {
474 ret
= i915_emit_box(dev
, &cliprects
[i
],
475 batch
->DR1
, batch
->DR4
);
480 if (!IS_I830(dev
) && !IS_845G(dev
)) {
481 ret
= BEGIN_LP_RING(2);
485 if (INTEL_INFO(dev
)->gen
>= 4) {
486 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6) | MI_BATCH_NON_SECURE_I965
);
487 OUT_RING(batch
->start
);
489 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6));
490 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
493 ret
= BEGIN_LP_RING(4);
497 OUT_RING(MI_BATCH_BUFFER
);
498 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
499 OUT_RING(batch
->start
+ batch
->used
- 4);
506 if (IS_G4X(dev
) || IS_GEN5(dev
)) {
507 if (BEGIN_LP_RING(2) == 0) {
508 OUT_RING(MI_FLUSH
| MI_NO_WRITE_FLUSH
| MI_INVALIDATE_ISP
);
514 i915_emit_breadcrumb(dev
);
518 static int i915_dispatch_flip(struct drm_device
* dev
)
520 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
521 struct drm_i915_master_private
*master_priv
=
522 dev
->primary
->master
->driver_priv
;
525 if (!master_priv
->sarea_priv
)
528 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
530 dev_priv
->current_page
,
531 master_priv
->sarea_priv
->pf_current_page
);
533 i915_kernel_lost_context(dev
);
535 ret
= BEGIN_LP_RING(10);
539 OUT_RING(MI_FLUSH
| MI_READ_FLUSH
);
542 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO
| ASYNC_FLIP
);
544 if (dev_priv
->current_page
== 0) {
545 OUT_RING(dev_priv
->back_offset
);
546 dev_priv
->current_page
= 1;
548 OUT_RING(dev_priv
->front_offset
);
549 dev_priv
->current_page
= 0;
553 OUT_RING(MI_WAIT_FOR_EVENT
| MI_WAIT_FOR_PLANE_A_FLIP
);
558 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
++;
560 if (BEGIN_LP_RING(4) == 0) {
561 OUT_RING(MI_STORE_DWORD_INDEX
);
562 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
563 OUT_RING(dev_priv
->counter
);
568 master_priv
->sarea_priv
->pf_current_page
= dev_priv
->current_page
;
572 static int i915_quiescent(struct drm_device
*dev
)
574 struct intel_ring_buffer
*ring
= LP_RING(dev
->dev_private
);
576 i915_kernel_lost_context(dev
);
577 return intel_wait_ring_buffer(ring
, ring
->size
- 8);
580 static int i915_flush_ioctl(struct drm_device
*dev
, void *data
,
581 struct drm_file
*file_priv
)
585 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
587 mutex_lock(&dev
->struct_mutex
);
588 ret
= i915_quiescent(dev
);
589 mutex_unlock(&dev
->struct_mutex
);
594 static int i915_batchbuffer(struct drm_device
*dev
, void *data
,
595 struct drm_file
*file_priv
)
597 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
598 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
599 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
600 master_priv
->sarea_priv
;
601 drm_i915_batchbuffer_t
*batch
= data
;
603 struct drm_clip_rect
*cliprects
= NULL
;
605 if (!dev_priv
->allow_batchbuffer
) {
606 DRM_ERROR("Batchbuffer ioctl disabled\n");
610 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
611 batch
->start
, batch
->used
, batch
->num_cliprects
);
613 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
615 if (batch
->num_cliprects
< 0)
618 if (batch
->num_cliprects
) {
619 cliprects
= kcalloc(batch
->num_cliprects
,
620 sizeof(struct drm_clip_rect
),
622 if (cliprects
== NULL
)
625 ret
= copy_from_user(cliprects
, batch
->cliprects
,
626 batch
->num_cliprects
*
627 sizeof(struct drm_clip_rect
));
634 mutex_lock(&dev
->struct_mutex
);
635 ret
= i915_dispatch_batchbuffer(dev
, batch
, cliprects
);
636 mutex_unlock(&dev
->struct_mutex
);
639 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
647 static int i915_cmdbuffer(struct drm_device
*dev
, void *data
,
648 struct drm_file
*file_priv
)
650 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
651 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
652 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
653 master_priv
->sarea_priv
;
654 drm_i915_cmdbuffer_t
*cmdbuf
= data
;
655 struct drm_clip_rect
*cliprects
= NULL
;
659 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
660 cmdbuf
->buf
, cmdbuf
->sz
, cmdbuf
->num_cliprects
);
662 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
664 if (cmdbuf
->num_cliprects
< 0)
667 batch_data
= kmalloc(cmdbuf
->sz
, GFP_KERNEL
);
668 if (batch_data
== NULL
)
671 ret
= copy_from_user(batch_data
, cmdbuf
->buf
, cmdbuf
->sz
);
674 goto fail_batch_free
;
677 if (cmdbuf
->num_cliprects
) {
678 cliprects
= kcalloc(cmdbuf
->num_cliprects
,
679 sizeof(struct drm_clip_rect
), GFP_KERNEL
);
680 if (cliprects
== NULL
) {
682 goto fail_batch_free
;
685 ret
= copy_from_user(cliprects
, cmdbuf
->cliprects
,
686 cmdbuf
->num_cliprects
*
687 sizeof(struct drm_clip_rect
));
694 mutex_lock(&dev
->struct_mutex
);
695 ret
= i915_dispatch_cmdbuffer(dev
, cmdbuf
, cliprects
, batch_data
);
696 mutex_unlock(&dev
->struct_mutex
);
698 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
703 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
713 static int i915_flip_bufs(struct drm_device
*dev
, void *data
,
714 struct drm_file
*file_priv
)
718 DRM_DEBUG_DRIVER("%s\n", __func__
);
720 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
722 mutex_lock(&dev
->struct_mutex
);
723 ret
= i915_dispatch_flip(dev
);
724 mutex_unlock(&dev
->struct_mutex
);
729 static int i915_getparam(struct drm_device
*dev
, void *data
,
730 struct drm_file
*file_priv
)
732 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
733 drm_i915_getparam_t
*param
= data
;
737 DRM_ERROR("called with no initialization\n");
741 switch (param
->param
) {
742 case I915_PARAM_IRQ_ACTIVE
:
743 value
= dev
->pdev
->irq
? 1 : 0;
745 case I915_PARAM_ALLOW_BATCHBUFFER
:
746 value
= dev_priv
->allow_batchbuffer
? 1 : 0;
748 case I915_PARAM_LAST_DISPATCH
:
749 value
= READ_BREADCRUMB(dev_priv
);
751 case I915_PARAM_CHIPSET_ID
:
752 value
= dev
->pci_device
;
754 case I915_PARAM_HAS_GEM
:
755 value
= dev_priv
->has_gem
;
757 case I915_PARAM_NUM_FENCES_AVAIL
:
758 value
= dev_priv
->num_fence_regs
- dev_priv
->fence_reg_start
;
760 case I915_PARAM_HAS_OVERLAY
:
761 value
= dev_priv
->overlay
? 1 : 0;
763 case I915_PARAM_HAS_PAGEFLIPPING
:
766 case I915_PARAM_HAS_EXECBUF2
:
768 value
= dev_priv
->has_gem
;
770 case I915_PARAM_HAS_BSD
:
771 value
= HAS_BSD(dev
);
773 case I915_PARAM_HAS_BLT
:
774 value
= HAS_BLT(dev
);
776 case I915_PARAM_HAS_RELAXED_FENCING
:
779 case I915_PARAM_HAS_COHERENT_RINGS
:
782 case I915_PARAM_HAS_EXEC_CONSTANTS
:
783 value
= INTEL_INFO(dev
)->gen
>= 4;
786 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
791 if (DRM_COPY_TO_USER(param
->value
, &value
, sizeof(int))) {
792 DRM_ERROR("DRM_COPY_TO_USER failed\n");
799 static int i915_setparam(struct drm_device
*dev
, void *data
,
800 struct drm_file
*file_priv
)
802 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
803 drm_i915_setparam_t
*param
= data
;
806 DRM_ERROR("called with no initialization\n");
810 switch (param
->param
) {
811 case I915_SETPARAM_USE_MI_BATCHBUFFER_START
:
813 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY
:
814 dev_priv
->tex_lru_log_granularity
= param
->value
;
816 case I915_SETPARAM_ALLOW_BATCHBUFFER
:
817 dev_priv
->allow_batchbuffer
= param
->value
;
819 case I915_SETPARAM_NUM_USED_FENCES
:
820 if (param
->value
> dev_priv
->num_fence_regs
||
823 /* Userspace can use first N regs */
824 dev_priv
->fence_reg_start
= param
->value
;
827 DRM_DEBUG_DRIVER("unknown parameter %d\n",
835 static int i915_set_status_page(struct drm_device
*dev
, void *data
,
836 struct drm_file
*file_priv
)
838 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
839 drm_i915_hws_addr_t
*hws
= data
;
840 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
842 if (!I915_NEED_GFX_HWS(dev
))
846 DRM_ERROR("called with no initialization\n");
850 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
851 WARN(1, "tried to set status page when mode setting active\n");
855 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32
)hws
->addr
);
857 ring
->status_page
.gfx_addr
= hws
->addr
& (0x1ffff<<12);
859 dev_priv
->hws_map
.offset
= dev
->agp
->base
+ hws
->addr
;
860 dev_priv
->hws_map
.size
= 4*1024;
861 dev_priv
->hws_map
.type
= 0;
862 dev_priv
->hws_map
.flags
= 0;
863 dev_priv
->hws_map
.mtrr
= 0;
865 drm_core_ioremap_wc(&dev_priv
->hws_map
, dev
);
866 if (dev_priv
->hws_map
.handle
== NULL
) {
867 i915_dma_cleanup(dev
);
868 ring
->status_page
.gfx_addr
= 0;
869 DRM_ERROR("can not ioremap virtual address for"
870 " G33 hw status page\n");
873 ring
->status_page
.page_addr
= dev_priv
->hws_map
.handle
;
874 memset(ring
->status_page
.page_addr
, 0, PAGE_SIZE
);
875 I915_WRITE(HWS_PGA
, ring
->status_page
.gfx_addr
);
877 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
878 ring
->status_page
.gfx_addr
);
879 DRM_DEBUG_DRIVER("load hws at %p\n",
880 ring
->status_page
.page_addr
);
884 static int i915_get_bridge_dev(struct drm_device
*dev
)
886 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
888 dev_priv
->bridge_dev
= pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
889 if (!dev_priv
->bridge_dev
) {
890 DRM_ERROR("bridge device not found\n");
896 #define MCHBAR_I915 0x44
897 #define MCHBAR_I965 0x48
898 #define MCHBAR_SIZE (4*4096)
900 #define DEVEN_REG 0x54
901 #define DEVEN_MCHBAR_EN (1 << 28)
903 /* Allocate space for the MCH regs if needed, return nonzero on error */
905 intel_alloc_mchbar_resource(struct drm_device
*dev
)
907 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
908 int reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
909 u32 temp_lo
, temp_hi
= 0;
913 if (INTEL_INFO(dev
)->gen
>= 4)
914 pci_read_config_dword(dev_priv
->bridge_dev
, reg
+ 4, &temp_hi
);
915 pci_read_config_dword(dev_priv
->bridge_dev
, reg
, &temp_lo
);
916 mchbar_addr
= ((u64
)temp_hi
<< 32) | temp_lo
;
918 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
921 pnp_range_reserved(mchbar_addr
, mchbar_addr
+ MCHBAR_SIZE
))
925 /* Get some space for it */
926 dev_priv
->mch_res
.name
= "i915 MCHBAR";
927 dev_priv
->mch_res
.flags
= IORESOURCE_MEM
;
928 ret
= pci_bus_alloc_resource(dev_priv
->bridge_dev
->bus
,
930 MCHBAR_SIZE
, MCHBAR_SIZE
,
932 0, pcibios_align_resource
,
933 dev_priv
->bridge_dev
);
935 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret
);
936 dev_priv
->mch_res
.start
= 0;
940 if (INTEL_INFO(dev
)->gen
>= 4)
941 pci_write_config_dword(dev_priv
->bridge_dev
, reg
+ 4,
942 upper_32_bits(dev_priv
->mch_res
.start
));
944 pci_write_config_dword(dev_priv
->bridge_dev
, reg
,
945 lower_32_bits(dev_priv
->mch_res
.start
));
949 /* Setup MCHBAR if possible, return true if we should disable it again */
951 intel_setup_mchbar(struct drm_device
*dev
)
953 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
954 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
958 dev_priv
->mchbar_need_disable
= false;
960 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
961 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
962 enabled
= !!(temp
& DEVEN_MCHBAR_EN
);
964 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
968 /* If it's already enabled, don't have to do anything */
972 if (intel_alloc_mchbar_resource(dev
))
975 dev_priv
->mchbar_need_disable
= true;
977 /* Space is allocated or reserved, so enable it. */
978 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
979 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
,
980 temp
| DEVEN_MCHBAR_EN
);
982 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
983 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
| 1);
988 intel_teardown_mchbar(struct drm_device
*dev
)
990 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
991 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
994 if (dev_priv
->mchbar_need_disable
) {
995 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
996 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
997 temp
&= ~DEVEN_MCHBAR_EN
;
998 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, temp
);
1000 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
1002 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
);
1006 if (dev_priv
->mch_res
.start
)
1007 release_resource(&dev_priv
->mch_res
);
1010 #define PTE_ADDRESS_MASK 0xfffff000
1011 #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
1012 #define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
1013 #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
1014 #define PTE_MAPPING_TYPE_CACHED (3 << 1)
1015 #define PTE_MAPPING_TYPE_MASK (3 << 1)
1016 #define PTE_VALID (1 << 0)
1019 * i915_stolen_to_phys - take an offset into stolen memory and turn it into
1022 * @offset: address to translate
1024 * Some chip functions require allocations from stolen space and need the
1025 * physical address of the memory in question.
1027 static unsigned long i915_stolen_to_phys(struct drm_device
*dev
, u32 offset
)
1029 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1030 struct pci_dev
*pdev
= dev_priv
->bridge_dev
;
1034 /* On the machines I have tested the Graphics Base of Stolen Memory
1035 * is unreliable, so compute the base by subtracting the stolen memory
1036 * from the Top of Low Usable DRAM which is where the BIOS places
1037 * the graphics stolen memory.
1039 if (INTEL_INFO(dev
)->gen
> 3 || IS_G33(dev
)) {
1040 /* top 32bits are reserved = 0 */
1041 pci_read_config_dword(pdev
, 0xA4, &base
);
1043 /* XXX presume 8xx is the same as i915 */
1044 pci_bus_read_config_dword(pdev
->bus
, 2, 0x5C, &base
);
1047 if (INTEL_INFO(dev
)->gen
> 3 || IS_G33(dev
)) {
1049 pci_read_config_word(pdev
, 0xb0, &val
);
1050 base
= val
>> 4 << 20;
1053 pci_read_config_byte(pdev
, 0x9c, &val
);
1054 base
= val
>> 3 << 27;
1056 base
-= dev_priv
->mm
.gtt
->stolen_size
;
1059 return base
+ offset
;
1062 static void i915_warn_stolen(struct drm_device
*dev
)
1064 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
1065 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
1068 static void i915_setup_compression(struct drm_device
*dev
, int size
)
1070 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1071 struct drm_mm_node
*compressed_fb
, *uninitialized_var(compressed_llb
);
1072 unsigned long cfb_base
;
1073 unsigned long ll_base
= 0;
1075 compressed_fb
= drm_mm_search_free(&dev_priv
->mm
.stolen
, size
, 4096, 0);
1077 compressed_fb
= drm_mm_get_block(compressed_fb
, size
, 4096);
1081 cfb_base
= i915_stolen_to_phys(dev
, compressed_fb
->start
);
1085 if (!(IS_GM45(dev
) || HAS_PCH_SPLIT(dev
))) {
1086 compressed_llb
= drm_mm_search_free(&dev_priv
->mm
.stolen
,
1089 compressed_llb
= drm_mm_get_block(compressed_llb
,
1091 if (!compressed_llb
)
1094 ll_base
= i915_stolen_to_phys(dev
, compressed_llb
->start
);
1099 dev_priv
->cfb_size
= size
;
1101 intel_disable_fbc(dev
);
1102 dev_priv
->compressed_fb
= compressed_fb
;
1103 if (HAS_PCH_SPLIT(dev
))
1104 I915_WRITE(ILK_DPFC_CB_BASE
, compressed_fb
->start
);
1105 else if (IS_GM45(dev
)) {
1106 I915_WRITE(DPFC_CB_BASE
, compressed_fb
->start
);
1108 I915_WRITE(FBC_CFB_BASE
, cfb_base
);
1109 I915_WRITE(FBC_LL_BASE
, ll_base
);
1110 dev_priv
->compressed_llb
= compressed_llb
;
1113 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
1114 cfb_base
, ll_base
, size
>> 20);
1118 drm_mm_put_block(compressed_llb
);
1120 drm_mm_put_block(compressed_fb
);
1122 dev_priv
->no_fbc_reason
= FBC_STOLEN_TOO_SMALL
;
1123 i915_warn_stolen(dev
);
1126 static void i915_cleanup_compression(struct drm_device
*dev
)
1128 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1130 drm_mm_put_block(dev_priv
->compressed_fb
);
1131 if (dev_priv
->compressed_llb
)
1132 drm_mm_put_block(dev_priv
->compressed_llb
);
1135 /* true = enable decode, false = disable decoder */
1136 static unsigned int i915_vga_set_decode(void *cookie
, bool state
)
1138 struct drm_device
*dev
= cookie
;
1140 intel_modeset_vga_set_state(dev
, state
);
1142 return VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
|
1143 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1145 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1148 static void i915_switcheroo_set_state(struct pci_dev
*pdev
, enum vga_switcheroo_state state
)
1150 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1151 pm_message_t pmm
= { .event
= PM_EVENT_SUSPEND
};
1152 if (state
== VGA_SWITCHEROO_ON
) {
1153 printk(KERN_INFO
"i915: switched on\n");
1154 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1155 /* i915 resume handler doesn't set to D0 */
1156 pci_set_power_state(dev
->pdev
, PCI_D0
);
1158 dev
->switch_power_state
= DRM_SWITCH_POWER_ON
;
1160 printk(KERN_ERR
"i915: switched off\n");
1161 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1162 i915_suspend(dev
, pmm
);
1163 dev
->switch_power_state
= DRM_SWITCH_POWER_OFF
;
1167 static bool i915_switcheroo_can_switch(struct pci_dev
*pdev
)
1169 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1172 spin_lock(&dev
->count_lock
);
1173 can_switch
= (dev
->open_count
== 0);
1174 spin_unlock(&dev
->count_lock
);
1178 static int i915_load_modeset_init(struct drm_device
*dev
)
1180 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1181 unsigned long prealloc_size
, gtt_size
, mappable_size
;
1184 prealloc_size
= dev_priv
->mm
.gtt
->stolen_size
;
1185 gtt_size
= dev_priv
->mm
.gtt
->gtt_total_entries
<< PAGE_SHIFT
;
1186 mappable_size
= dev_priv
->mm
.gtt
->gtt_mappable_entries
<< PAGE_SHIFT
;
1188 /* Basic memrange allocator for stolen space */
1189 drm_mm_init(&dev_priv
->mm
.stolen
, 0, prealloc_size
);
1191 /* Let GEM Manage all of the aperture.
1193 * However, leave one page at the end still bound to the scratch page.
1194 * There are a number of places where the hardware apparently
1195 * prefetches past the end of the object, and we've seen multiple
1196 * hangs with the GPU head pointer stuck in a batchbuffer bound
1197 * at the last page of the aperture. One page should be enough to
1198 * keep any prefetching inside of the aperture.
1200 i915_gem_do_init(dev
, 0, mappable_size
, gtt_size
- PAGE_SIZE
);
1202 mutex_lock(&dev
->struct_mutex
);
1203 ret
= i915_gem_init_ringbuffer(dev
);
1204 mutex_unlock(&dev
->struct_mutex
);
1208 /* Try to set up FBC with a reasonable compressed buffer size */
1209 if (I915_HAS_FBC(dev
) && i915_powersave
) {
1212 /* Leave 1M for line length buffer & misc. */
1214 /* Try to get a 32M buffer... */
1215 if (prealloc_size
> (36*1024*1024))
1216 cfb_size
= 32*1024*1024;
1217 else /* fall back to 7/8 of the stolen space */
1218 cfb_size
= prealloc_size
* 7 / 8;
1219 i915_setup_compression(dev
, cfb_size
);
1222 /* Allow hardware batchbuffers unless told otherwise. */
1223 dev_priv
->allow_batchbuffer
= 1;
1225 ret
= intel_parse_bios(dev
);
1227 DRM_INFO("failed to find VBIOS tables\n");
1229 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
1230 ret
= vga_client_register(dev
->pdev
, dev
, NULL
, i915_vga_set_decode
);
1232 goto cleanup_ringbuffer
;
1234 intel_register_dsm_handler();
1236 ret
= vga_switcheroo_register_client(dev
->pdev
,
1237 i915_switcheroo_set_state
,
1239 i915_switcheroo_can_switch
);
1241 goto cleanup_vga_client
;
1243 /* IIR "flip pending" bit means done if this bit is set */
1244 if (IS_GEN3(dev
) && (I915_READ(ECOSKPD
) & ECO_FLIP_DONE
))
1245 dev_priv
->flip_pending_is_done
= true;
1247 intel_modeset_init(dev
);
1249 ret
= drm_irq_install(dev
);
1251 goto cleanup_vga_switcheroo
;
1253 /* Always safe in the mode setting case. */
1254 /* FIXME: do pre/post-mode set stuff in core KMS code */
1255 dev
->vblank_disable_allowed
= 1;
1257 ret
= intel_fbdev_init(dev
);
1261 drm_kms_helper_poll_init(dev
);
1263 /* We're off and running w/KMS */
1264 dev_priv
->mm
.suspended
= 0;
1269 drm_irq_uninstall(dev
);
1270 cleanup_vga_switcheroo
:
1271 vga_switcheroo_unregister_client(dev
->pdev
);
1273 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
1275 mutex_lock(&dev
->struct_mutex
);
1276 i915_gem_cleanup_ringbuffer(dev
);
1277 mutex_unlock(&dev
->struct_mutex
);
1282 int i915_master_create(struct drm_device
*dev
, struct drm_master
*master
)
1284 struct drm_i915_master_private
*master_priv
;
1286 master_priv
= kzalloc(sizeof(*master_priv
), GFP_KERNEL
);
1290 master
->driver_priv
= master_priv
;
1294 void i915_master_destroy(struct drm_device
*dev
, struct drm_master
*master
)
1296 struct drm_i915_master_private
*master_priv
= master
->driver_priv
;
1303 master
->driver_priv
= NULL
;
1306 static void i915_pineview_get_mem_freq(struct drm_device
*dev
)
1308 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1311 tmp
= I915_READ(CLKCFG
);
1313 switch (tmp
& CLKCFG_FSB_MASK
) {
1314 case CLKCFG_FSB_533
:
1315 dev_priv
->fsb_freq
= 533; /* 133*4 */
1317 case CLKCFG_FSB_800
:
1318 dev_priv
->fsb_freq
= 800; /* 200*4 */
1320 case CLKCFG_FSB_667
:
1321 dev_priv
->fsb_freq
= 667; /* 167*4 */
1323 case CLKCFG_FSB_400
:
1324 dev_priv
->fsb_freq
= 400; /* 100*4 */
1328 switch (tmp
& CLKCFG_MEM_MASK
) {
1329 case CLKCFG_MEM_533
:
1330 dev_priv
->mem_freq
= 533;
1332 case CLKCFG_MEM_667
:
1333 dev_priv
->mem_freq
= 667;
1335 case CLKCFG_MEM_800
:
1336 dev_priv
->mem_freq
= 800;
1340 /* detect pineview DDR3 setting */
1341 tmp
= I915_READ(CSHRDDR3CTL
);
1342 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
1345 static void i915_ironlake_get_mem_freq(struct drm_device
*dev
)
1347 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1350 ddrpll
= I915_READ16(DDRMPLL1
);
1351 csipll
= I915_READ16(CSIPLL0
);
1353 switch (ddrpll
& 0xff) {
1355 dev_priv
->mem_freq
= 800;
1358 dev_priv
->mem_freq
= 1066;
1361 dev_priv
->mem_freq
= 1333;
1364 dev_priv
->mem_freq
= 1600;
1367 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
1369 dev_priv
->mem_freq
= 0;
1373 dev_priv
->r_t
= dev_priv
->mem_freq
;
1375 switch (csipll
& 0x3ff) {
1377 dev_priv
->fsb_freq
= 3200;
1380 dev_priv
->fsb_freq
= 3733;
1383 dev_priv
->fsb_freq
= 4266;
1386 dev_priv
->fsb_freq
= 4800;
1389 dev_priv
->fsb_freq
= 5333;
1392 dev_priv
->fsb_freq
= 5866;
1395 dev_priv
->fsb_freq
= 6400;
1398 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
1400 dev_priv
->fsb_freq
= 0;
1404 if (dev_priv
->fsb_freq
== 3200) {
1406 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
1413 static const struct cparams
{
1419 { 1, 1333, 301, 28664 },
1420 { 1, 1066, 294, 24460 },
1421 { 1, 800, 294, 25192 },
1422 { 0, 1333, 276, 27605 },
1423 { 0, 1066, 276, 27605 },
1424 { 0, 800, 231, 23784 },
1427 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
1429 u64 total_count
, diff
, ret
;
1430 u32 count1
, count2
, count3
, m
= 0, c
= 0;
1431 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
1434 diff1
= now
- dev_priv
->last_time1
;
1436 count1
= I915_READ(DMIEC
);
1437 count2
= I915_READ(DDREC
);
1438 count3
= I915_READ(CSIEC
);
1440 total_count
= count1
+ count2
+ count3
;
1442 /* FIXME: handle per-counter overflow */
1443 if (total_count
< dev_priv
->last_count1
) {
1444 diff
= ~0UL - dev_priv
->last_count1
;
1445 diff
+= total_count
;
1447 diff
= total_count
- dev_priv
->last_count1
;
1450 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
1451 if (cparams
[i
].i
== dev_priv
->c_m
&&
1452 cparams
[i
].t
== dev_priv
->r_t
) {
1459 diff
= div_u64(diff
, diff1
);
1460 ret
= ((m
* diff
) + c
);
1461 ret
= div_u64(ret
, 10);
1463 dev_priv
->last_count1
= total_count
;
1464 dev_priv
->last_time1
= now
;
1469 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
1471 unsigned long m
, x
, b
;
1474 tsfs
= I915_READ(TSFS
);
1476 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
1477 x
= I915_READ8(TR1
);
1479 b
= tsfs
& TSFS_INTR_MASK
;
1481 return ((m
* x
) / 127) - b
;
1484 static u16
pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
1486 static const struct v_table
{
1487 u16 vd
; /* in .1 mil */
1488 u16 vm
; /* in .1 mil */
1619 if (dev_priv
->info
->is_mobile
)
1620 return v_table
[pxvid
].vm
;
1622 return v_table
[pxvid
].vd
;
1625 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
1627 struct timespec now
, diff1
;
1629 unsigned long diffms
;
1632 getrawmonotonic(&now
);
1633 diff1
= timespec_sub(now
, dev_priv
->last_time2
);
1635 /* Don't divide by 0 */
1636 diffms
= diff1
.tv_sec
* 1000 + diff1
.tv_nsec
/ 1000000;
1640 count
= I915_READ(GFXEC
);
1642 if (count
< dev_priv
->last_count2
) {
1643 diff
= ~0UL - dev_priv
->last_count2
;
1646 diff
= count
- dev_priv
->last_count2
;
1649 dev_priv
->last_count2
= count
;
1650 dev_priv
->last_time2
= now
;
1652 /* More magic constants... */
1654 diff
= div_u64(diff
, diffms
* 10);
1655 dev_priv
->gfx_power
= diff
;
1658 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
1660 unsigned long t
, corr
, state1
, corr2
, state2
;
1663 pxvid
= I915_READ(PXVFREQ_BASE
+ (dev_priv
->cur_delay
* 4));
1664 pxvid
= (pxvid
>> 24) & 0x7f;
1665 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
1669 t
= i915_mch_val(dev_priv
);
1671 /* Revel in the empirically derived constants */
1673 /* Correction factor in 1/100000 units */
1675 corr
= ((t
* 2349) + 135940);
1677 corr
= ((t
* 964) + 29317);
1679 corr
= ((t
* 301) + 1004);
1681 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
1683 corr2
= (corr
* dev_priv
->corr
);
1685 state2
= (corr2
* state1
) / 10000;
1686 state2
/= 100; /* convert to mW */
1688 i915_update_gfx_val(dev_priv
);
1690 return dev_priv
->gfx_power
+ state2
;
1693 /* Global for IPS driver to get at the current i915 device */
1694 static struct drm_i915_private
*i915_mch_dev
;
1696 * Lock protecting IPS related data structures
1698 * - dev_priv->max_delay
1699 * - dev_priv->min_delay
1701 * - dev_priv->gpu_busy
1703 static DEFINE_SPINLOCK(mchdev_lock
);
1706 * i915_read_mch_val - return value for IPS use
1708 * Calculate and return a value for the IPS driver to use when deciding whether
1709 * we have thermal and power headroom to increase CPU or GPU power budget.
1711 unsigned long i915_read_mch_val(void)
1713 struct drm_i915_private
*dev_priv
;
1714 unsigned long chipset_val
, graphics_val
, ret
= 0;
1716 spin_lock(&mchdev_lock
);
1719 dev_priv
= i915_mch_dev
;
1721 chipset_val
= i915_chipset_val(dev_priv
);
1722 graphics_val
= i915_gfx_val(dev_priv
);
1724 ret
= chipset_val
+ graphics_val
;
1727 spin_unlock(&mchdev_lock
);
1731 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
1734 * i915_gpu_raise - raise GPU frequency limit
1736 * Raise the limit; IPS indicates we have thermal headroom.
1738 bool i915_gpu_raise(void)
1740 struct drm_i915_private
*dev_priv
;
1743 spin_lock(&mchdev_lock
);
1744 if (!i915_mch_dev
) {
1748 dev_priv
= i915_mch_dev
;
1750 if (dev_priv
->max_delay
> dev_priv
->fmax
)
1751 dev_priv
->max_delay
--;
1754 spin_unlock(&mchdev_lock
);
1758 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
1761 * i915_gpu_lower - lower GPU frequency limit
1763 * IPS indicates we're close to a thermal limit, so throttle back the GPU
1764 * frequency maximum.
1766 bool i915_gpu_lower(void)
1768 struct drm_i915_private
*dev_priv
;
1771 spin_lock(&mchdev_lock
);
1772 if (!i915_mch_dev
) {
1776 dev_priv
= i915_mch_dev
;
1778 if (dev_priv
->max_delay
< dev_priv
->min_delay
)
1779 dev_priv
->max_delay
++;
1782 spin_unlock(&mchdev_lock
);
1786 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
1789 * i915_gpu_busy - indicate GPU business to IPS
1791 * Tell the IPS driver whether or not the GPU is busy.
1793 bool i915_gpu_busy(void)
1795 struct drm_i915_private
*dev_priv
;
1798 spin_lock(&mchdev_lock
);
1801 dev_priv
= i915_mch_dev
;
1803 ret
= dev_priv
->busy
;
1806 spin_unlock(&mchdev_lock
);
1810 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
1813 * i915_gpu_turbo_disable - disable graphics turbo
1815 * Disable graphics turbo by resetting the max frequency and setting the
1816 * current frequency to the default.
1818 bool i915_gpu_turbo_disable(void)
1820 struct drm_i915_private
*dev_priv
;
1823 spin_lock(&mchdev_lock
);
1824 if (!i915_mch_dev
) {
1828 dev_priv
= i915_mch_dev
;
1830 dev_priv
->max_delay
= dev_priv
->fstart
;
1832 if (!ironlake_set_drps(dev_priv
->dev
, dev_priv
->fstart
))
1836 spin_unlock(&mchdev_lock
);
1840 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
1843 * Tells the intel_ips driver that the i915 driver is now loaded, if
1844 * IPS got loaded first.
1846 * This awkward dance is so that neither module has to depend on the
1847 * other in order for IPS to do the appropriate communication of
1848 * GPU turbo limits to i915.
1851 ips_ping_for_i915_load(void)
1855 link
= symbol_get(ips_link_to_i915_driver
);
1858 symbol_put(ips_link_to_i915_driver
);
1863 * i915_driver_load - setup chip and create an initial config
1865 * @flags: startup flags
1867 * The driver load routine has to do several things:
1868 * - drive output discovery via intel_modeset_init()
1869 * - initialize the memory manager
1870 * - allocate initial config memory
1871 * - setup the DRM framebuffer with the allocated memory
1873 int i915_driver_load(struct drm_device
*dev
, unsigned long flags
)
1875 struct drm_i915_private
*dev_priv
;
1876 int ret
= 0, mmio_bar
;
1879 /* i915 has 4 more counters */
1881 dev
->types
[6] = _DRM_STAT_IRQ
;
1882 dev
->types
[7] = _DRM_STAT_PRIMARY
;
1883 dev
->types
[8] = _DRM_STAT_SECONDARY
;
1884 dev
->types
[9] = _DRM_STAT_DMA
;
1886 dev_priv
= kzalloc(sizeof(drm_i915_private_t
), GFP_KERNEL
);
1887 if (dev_priv
== NULL
)
1890 dev
->dev_private
= (void *)dev_priv
;
1891 dev_priv
->dev
= dev
;
1892 dev_priv
->info
= (struct intel_device_info
*) flags
;
1894 if (i915_get_bridge_dev(dev
)) {
1899 /* overlay on gen2 is broken and can't address above 1G */
1901 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(30));
1903 mmio_bar
= IS_GEN2(dev
) ? 1 : 0;
1904 dev_priv
->regs
= pci_iomap(dev
->pdev
, mmio_bar
, 0);
1905 if (!dev_priv
->regs
) {
1906 DRM_ERROR("failed to map registers\n");
1911 dev_priv
->mm
.gtt
= intel_gtt_get();
1912 if (!dev_priv
->mm
.gtt
) {
1913 DRM_ERROR("Failed to initialize GTT\n");
1918 agp_size
= dev_priv
->mm
.gtt
->gtt_mappable_entries
<< PAGE_SHIFT
;
1920 dev_priv
->mm
.gtt_mapping
=
1921 io_mapping_create_wc(dev
->agp
->base
, agp_size
);
1922 if (dev_priv
->mm
.gtt_mapping
== NULL
) {
1927 /* Set up a WC MTRR for non-PAT systems. This is more common than
1928 * one would think, because the kernel disables PAT on first
1929 * generation Core chips because WC PAT gets overridden by a UC
1930 * MTRR if present. Even if a UC MTRR isn't present.
1932 dev_priv
->mm
.gtt_mtrr
= mtrr_add(dev
->agp
->base
,
1934 MTRR_TYPE_WRCOMB
, 1);
1935 if (dev_priv
->mm
.gtt_mtrr
< 0) {
1936 DRM_INFO("MTRR allocation failed. Graphics "
1937 "performance may suffer.\n");
1940 /* The i915 workqueue is primarily used for batched retirement of
1941 * requests (and thus managing bo) once the task has been completed
1942 * by the GPU. i915_gem_retire_requests() is called directly when we
1943 * need high-priority retirement, such as waiting for an explicit
1946 * It is also used for periodic low-priority events, such as
1947 * idle-timers and recording error state.
1949 * All tasks on the workqueue are expected to acquire the dev mutex
1950 * so there is no point in running more than one instance of the
1951 * workqueue at any time: max_active = 1 and NON_REENTRANT.
1953 dev_priv
->wq
= alloc_workqueue("i915",
1954 WQ_UNBOUND
| WQ_NON_REENTRANT
,
1956 if (dev_priv
->wq
== NULL
) {
1957 DRM_ERROR("Failed to create our workqueue.\n");
1962 /* enable GEM by default */
1963 dev_priv
->has_gem
= 1;
1965 if (dev_priv
->has_gem
== 0 &&
1966 drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1967 DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
1969 goto out_workqueue_free
;
1972 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
1973 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
1974 if (IS_G4X(dev
) || IS_GEN5(dev
) || IS_GEN6(dev
)) {
1975 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
1976 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
1979 /* Try to make sure MCHBAR is enabled before poking at it */
1980 intel_setup_mchbar(dev
);
1981 intel_setup_gmbus(dev
);
1982 intel_opregion_setup(dev
);
1984 /* Make sure the bios did its job and set up vital registers */
1985 intel_setup_bios(dev
);
1990 if (!I915_NEED_GFX_HWS(dev
)) {
1991 ret
= i915_init_phys_hws(dev
);
1993 goto out_gem_unload
;
1996 if (IS_PINEVIEW(dev
))
1997 i915_pineview_get_mem_freq(dev
);
1998 else if (IS_GEN5(dev
))
1999 i915_ironlake_get_mem_freq(dev
);
2001 /* On the 945G/GM, the chipset reports the MSI capability on the
2002 * integrated graphics even though the support isn't actually there
2003 * according to the published specs. It doesn't appear to function
2004 * correctly in testing on 945G.
2005 * This may be a side effect of MSI having been made available for PEG
2006 * and the registers being closely associated.
2008 * According to chipset errata, on the 965GM, MSI interrupts may
2009 * be lost or delayed, but we use them anyways to avoid
2010 * stuck interrupts on some machines.
2012 if (!IS_I945G(dev
) && !IS_I945GM(dev
))
2013 pci_enable_msi(dev
->pdev
);
2015 spin_lock_init(&dev_priv
->irq_lock
);
2016 spin_lock_init(&dev_priv
->error_lock
);
2017 dev_priv
->trace_irq_seqno
= 0;
2019 ret
= drm_vblank_init(dev
, I915_NUM_PIPE
);
2021 goto out_gem_unload
;
2023 /* Start out suspended */
2024 dev_priv
->mm
.suspended
= 1;
2026 intel_detect_pch(dev
);
2028 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2029 ret
= i915_load_modeset_init(dev
);
2031 DRM_ERROR("failed to init modeset\n");
2032 goto out_gem_unload
;
2036 /* Must be done after probing outputs */
2037 intel_opregion_init(dev
);
2038 acpi_video_register();
2040 setup_timer(&dev_priv
->hangcheck_timer
, i915_hangcheck_elapsed
,
2041 (unsigned long) dev
);
2043 spin_lock(&mchdev_lock
);
2044 i915_mch_dev
= dev_priv
;
2045 dev_priv
->mchdev_lock
= &mchdev_lock
;
2046 spin_unlock(&mchdev_lock
);
2048 ips_ping_for_i915_load();
2053 if (dev
->pdev
->msi_enabled
)
2054 pci_disable_msi(dev
->pdev
);
2056 intel_teardown_gmbus(dev
);
2057 intel_teardown_mchbar(dev
);
2059 destroy_workqueue(dev_priv
->wq
);
2061 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
2063 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
2065 pci_dev_put(dev_priv
->bridge_dev
);
2071 int i915_driver_unload(struct drm_device
*dev
)
2073 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2076 spin_lock(&mchdev_lock
);
2077 i915_mch_dev
= NULL
;
2078 spin_unlock(&mchdev_lock
);
2080 if (dev_priv
->mm
.inactive_shrinker
.shrink
)
2081 unregister_shrinker(&dev_priv
->mm
.inactive_shrinker
);
2083 mutex_lock(&dev
->struct_mutex
);
2084 ret
= i915_gpu_idle(dev
);
2086 DRM_ERROR("failed to idle hardware: %d\n", ret
);
2087 mutex_unlock(&dev
->struct_mutex
);
2089 /* Cancel the retire work handler, which should be idle now. */
2090 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
2092 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
2093 if (dev_priv
->mm
.gtt_mtrr
>= 0) {
2094 mtrr_del(dev_priv
->mm
.gtt_mtrr
, dev
->agp
->base
,
2095 dev
->agp
->agp_info
.aper_size
* 1024 * 1024);
2096 dev_priv
->mm
.gtt_mtrr
= -1;
2099 acpi_video_unregister();
2101 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2102 intel_fbdev_fini(dev
);
2103 intel_modeset_cleanup(dev
);
2106 * free the memory space allocated for the child device
2107 * config parsed from VBT
2109 if (dev_priv
->child_dev
&& dev_priv
->child_dev_num
) {
2110 kfree(dev_priv
->child_dev
);
2111 dev_priv
->child_dev
= NULL
;
2112 dev_priv
->child_dev_num
= 0;
2115 vga_switcheroo_unregister_client(dev
->pdev
);
2116 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
2119 /* Free error state after interrupts are fully disabled. */
2120 del_timer_sync(&dev_priv
->hangcheck_timer
);
2121 cancel_work_sync(&dev_priv
->error_work
);
2122 i915_destroy_error_state(dev
);
2124 if (dev
->pdev
->msi_enabled
)
2125 pci_disable_msi(dev
->pdev
);
2127 intel_opregion_fini(dev
);
2129 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2130 /* Flush any outstanding unpin_work. */
2131 flush_workqueue(dev_priv
->wq
);
2133 i915_gem_free_all_phys_object(dev
);
2135 mutex_lock(&dev
->struct_mutex
);
2136 i915_gem_cleanup_ringbuffer(dev
);
2137 mutex_unlock(&dev
->struct_mutex
);
2138 if (I915_HAS_FBC(dev
) && i915_powersave
)
2139 i915_cleanup_compression(dev
);
2140 drm_mm_takedown(&dev_priv
->mm
.stolen
);
2142 intel_cleanup_overlay(dev
);
2144 if (!I915_NEED_GFX_HWS(dev
))
2148 if (dev_priv
->regs
!= NULL
)
2149 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
2151 intel_teardown_gmbus(dev
);
2152 intel_teardown_mchbar(dev
);
2154 destroy_workqueue(dev_priv
->wq
);
2156 pci_dev_put(dev_priv
->bridge_dev
);
2157 kfree(dev
->dev_private
);
2162 int i915_driver_open(struct drm_device
*dev
, struct drm_file
*file
)
2164 struct drm_i915_file_private
*file_priv
;
2166 DRM_DEBUG_DRIVER("\n");
2167 file_priv
= kmalloc(sizeof(*file_priv
), GFP_KERNEL
);
2171 file
->driver_priv
= file_priv
;
2173 spin_lock_init(&file_priv
->mm
.lock
);
2174 INIT_LIST_HEAD(&file_priv
->mm
.request_list
);
2180 * i915_driver_lastclose - clean up after all DRM clients have exited
2183 * Take care of cleaning up after all DRM clients have exited. In the
2184 * mode setting case, we want to restore the kernel's initial mode (just
2185 * in case the last client left us in a bad state).
2187 * Additionally, in the non-mode setting case, we'll tear down the AGP
2188 * and DMA structures, since the kernel won't be using them, and clea
2191 void i915_driver_lastclose(struct drm_device
* dev
)
2193 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2195 if (!dev_priv
|| drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2196 drm_fb_helper_restore();
2197 vga_switcheroo_process_delayed_switch();
2201 i915_gem_lastclose(dev
);
2203 if (dev_priv
->agp_heap
)
2204 i915_mem_takedown(&(dev_priv
->agp_heap
));
2206 i915_dma_cleanup(dev
);
2209 void i915_driver_preclose(struct drm_device
* dev
, struct drm_file
*file_priv
)
2211 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2212 i915_gem_release(dev
, file_priv
);
2213 if (!drm_core_check_feature(dev
, DRIVER_MODESET
))
2214 i915_mem_release(dev
, file_priv
, dev_priv
->agp_heap
);
2217 void i915_driver_postclose(struct drm_device
*dev
, struct drm_file
*file
)
2219 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2224 struct drm_ioctl_desc i915_ioctls
[] = {
2225 DRM_IOCTL_DEF_DRV(I915_INIT
, i915_dma_init
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2226 DRM_IOCTL_DEF_DRV(I915_FLUSH
, i915_flush_ioctl
, DRM_AUTH
),
2227 DRM_IOCTL_DEF_DRV(I915_FLIP
, i915_flip_bufs
, DRM_AUTH
),
2228 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER
, i915_batchbuffer
, DRM_AUTH
),
2229 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT
, i915_irq_emit
, DRM_AUTH
),
2230 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT
, i915_irq_wait
, DRM_AUTH
),
2231 DRM_IOCTL_DEF_DRV(I915_GETPARAM
, i915_getparam
, DRM_AUTH
),
2232 DRM_IOCTL_DEF_DRV(I915_SETPARAM
, i915_setparam
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2233 DRM_IOCTL_DEF_DRV(I915_ALLOC
, i915_mem_alloc
, DRM_AUTH
),
2234 DRM_IOCTL_DEF_DRV(I915_FREE
, i915_mem_free
, DRM_AUTH
),
2235 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP
, i915_mem_init_heap
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2236 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER
, i915_cmdbuffer
, DRM_AUTH
),
2237 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP
, i915_mem_destroy_heap
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2238 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE
, i915_vblank_pipe_set
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2239 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE
, i915_vblank_pipe_get
, DRM_AUTH
),
2240 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP
, i915_vblank_swap
, DRM_AUTH
),
2241 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR
, i915_set_status_page
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2242 DRM_IOCTL_DEF_DRV(I915_GEM_INIT
, i915_gem_init_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2243 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER
, i915_gem_execbuffer
, DRM_AUTH
|DRM_UNLOCKED
),
2244 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2
, i915_gem_execbuffer2
, DRM_AUTH
|DRM_UNLOCKED
),
2245 DRM_IOCTL_DEF_DRV(I915_GEM_PIN
, i915_gem_pin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2246 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN
, i915_gem_unpin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2247 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY
, i915_gem_busy_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
2248 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE
, i915_gem_throttle_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
2249 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT
, i915_gem_entervt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2250 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT
, i915_gem_leavevt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2251 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE
, i915_gem_create_ioctl
, DRM_UNLOCKED
),
2252 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD
, i915_gem_pread_ioctl
, DRM_UNLOCKED
),
2253 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE
, i915_gem_pwrite_ioctl
, DRM_UNLOCKED
),
2254 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP
, i915_gem_mmap_ioctl
, DRM_UNLOCKED
),
2255 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT
, i915_gem_mmap_gtt_ioctl
, DRM_UNLOCKED
),
2256 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN
, i915_gem_set_domain_ioctl
, DRM_UNLOCKED
),
2257 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH
, i915_gem_sw_finish_ioctl
, DRM_UNLOCKED
),
2258 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING
, i915_gem_set_tiling
, DRM_UNLOCKED
),
2259 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING
, i915_gem_get_tiling
, DRM_UNLOCKED
),
2260 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE
, i915_gem_get_aperture_ioctl
, DRM_UNLOCKED
),
2261 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID
, intel_get_pipe_from_crtc_id
, DRM_UNLOCKED
),
2262 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE
, i915_gem_madvise_ioctl
, DRM_UNLOCKED
),
2263 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE
, intel_overlay_put_image
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
2264 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS
, intel_overlay_attrs
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
2267 int i915_max_ioctl
= DRM_ARRAY_SIZE(i915_ioctls
);
2270 * Determine if the device really is AGP or not.
2272 * All Intel graphics chipsets are treated as AGP, even if they are really
2275 * \param dev The device to be tested.
2278 * A value of 1 is always retured to indictate every i9x5 is AGP.
2280 int i915_driver_device_is_agp(struct drm_device
* dev
)