1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
31 #include "drm_crtc_helper.h"
32 #include "drm_fb_helper.h"
33 #include "intel_drv.h"
36 #include "i915_trace.h"
37 #include "../../../platform/x86/intel_ips.h"
38 #include <linux/pci.h>
39 #include <linux/vgaarb.h>
40 #include <linux/acpi.h>
41 #include <linux/pnp.h>
42 #include <linux/vga_switcheroo.h>
43 #include <linux/slab.h>
44 #include <acpi/video.h>
47 * Sets up the hardware status page for devices that need a physical address
50 static int i915_init_phys_hws(struct drm_device
*dev
)
52 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
53 /* Program Hardware Status Page */
54 dev_priv
->status_page_dmah
=
55 drm_pci_alloc(dev
, PAGE_SIZE
, PAGE_SIZE
);
57 if (!dev_priv
->status_page_dmah
) {
58 DRM_ERROR("Can not allocate hardware status page\n");
61 dev_priv
->render_ring
.status_page
.page_addr
62 = dev_priv
->status_page_dmah
->vaddr
;
63 dev_priv
->dma_status_page
= dev_priv
->status_page_dmah
->busaddr
;
65 memset(dev_priv
->render_ring
.status_page
.page_addr
, 0, PAGE_SIZE
);
67 if (INTEL_INFO(dev
)->gen
>= 4)
68 dev_priv
->dma_status_page
|= (dev_priv
->dma_status_page
>> 28) &
71 I915_WRITE(HWS_PGA
, dev_priv
->dma_status_page
);
72 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
77 * Frees the hardware status page, whether it's a physical address or a virtual
78 * address set up by the X Server.
80 static void i915_free_hws(struct drm_device
*dev
)
82 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
83 if (dev_priv
->status_page_dmah
) {
84 drm_pci_free(dev
, dev_priv
->status_page_dmah
);
85 dev_priv
->status_page_dmah
= NULL
;
88 if (dev_priv
->render_ring
.status_page
.gfx_addr
) {
89 dev_priv
->render_ring
.status_page
.gfx_addr
= 0;
90 drm_core_ioremapfree(&dev_priv
->hws_map
, dev
);
93 /* Need to rewrite hardware status page */
94 I915_WRITE(HWS_PGA
, 0x1ffff000);
97 void i915_kernel_lost_context(struct drm_device
* dev
)
99 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
100 struct drm_i915_master_private
*master_priv
;
101 struct intel_ring_buffer
*ring
= &dev_priv
->render_ring
;
104 * We should never lose context on the ring with modesetting
105 * as we don't expose it to userspace
107 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
110 ring
->head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
111 ring
->tail
= I915_READ(PRB0_TAIL
) & TAIL_ADDR
;
112 ring
->space
= ring
->head
- (ring
->tail
+ 8);
114 ring
->space
+= ring
->size
;
116 if (!dev
->primary
->master
)
119 master_priv
= dev
->primary
->master
->driver_priv
;
120 if (ring
->head
== ring
->tail
&& master_priv
->sarea_priv
)
121 master_priv
->sarea_priv
->perf_boxes
|= I915_BOX_RING_EMPTY
;
124 static int i915_dma_cleanup(struct drm_device
* dev
)
126 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
127 /* Make sure interrupts are disabled here because the uninstall ioctl
128 * may not have been called from userspace and after dev_private
129 * is freed, it's too late.
131 if (dev
->irq_enabled
)
132 drm_irq_uninstall(dev
);
134 mutex_lock(&dev
->struct_mutex
);
135 intel_cleanup_ring_buffer(dev
, &dev_priv
->render_ring
);
136 intel_cleanup_ring_buffer(dev
, &dev_priv
->bsd_ring
);
137 intel_cleanup_ring_buffer(dev
, &dev_priv
->blt_ring
);
138 mutex_unlock(&dev
->struct_mutex
);
140 /* Clear the HWS virtual address at teardown */
141 if (I915_NEED_GFX_HWS(dev
))
147 static int i915_initialize(struct drm_device
* dev
, drm_i915_init_t
* init
)
149 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
150 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
152 master_priv
->sarea
= drm_getsarea(dev
);
153 if (master_priv
->sarea
) {
154 master_priv
->sarea_priv
= (drm_i915_sarea_t
*)
155 ((u8
*)master_priv
->sarea
->handle
+ init
->sarea_priv_offset
);
157 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
160 if (init
->ring_size
!= 0) {
161 if (dev_priv
->render_ring
.gem_object
!= NULL
) {
162 i915_dma_cleanup(dev
);
163 DRM_ERROR("Client tried to initialize ringbuffer in "
168 dev_priv
->render_ring
.size
= init
->ring_size
;
170 dev_priv
->render_ring
.map
.offset
= init
->ring_start
;
171 dev_priv
->render_ring
.map
.size
= init
->ring_size
;
172 dev_priv
->render_ring
.map
.type
= 0;
173 dev_priv
->render_ring
.map
.flags
= 0;
174 dev_priv
->render_ring
.map
.mtrr
= 0;
176 drm_core_ioremap_wc(&dev_priv
->render_ring
.map
, dev
);
178 if (dev_priv
->render_ring
.map
.handle
== NULL
) {
179 i915_dma_cleanup(dev
);
180 DRM_ERROR("can not ioremap virtual address for"
186 dev_priv
->render_ring
.virtual_start
= dev_priv
->render_ring
.map
.handle
;
188 dev_priv
->cpp
= init
->cpp
;
189 dev_priv
->back_offset
= init
->back_offset
;
190 dev_priv
->front_offset
= init
->front_offset
;
191 dev_priv
->current_page
= 0;
192 if (master_priv
->sarea_priv
)
193 master_priv
->sarea_priv
->pf_current_page
= 0;
195 /* Allow hardware batchbuffers unless told otherwise.
197 dev_priv
->allow_batchbuffer
= 1;
202 static int i915_dma_resume(struct drm_device
* dev
)
204 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
206 struct intel_ring_buffer
*ring
;
207 DRM_DEBUG_DRIVER("%s\n", __func__
);
209 ring
= &dev_priv
->render_ring
;
211 if (ring
->map
.handle
== NULL
) {
212 DRM_ERROR("can not ioremap virtual address for"
217 /* Program Hardware Status Page */
218 if (!ring
->status_page
.page_addr
) {
219 DRM_ERROR("Can not find hardware status page\n");
222 DRM_DEBUG_DRIVER("hw status page @ %p\n",
223 ring
->status_page
.page_addr
);
224 if (ring
->status_page
.gfx_addr
!= 0)
225 intel_ring_setup_status_page(dev
, ring
);
227 I915_WRITE(HWS_PGA
, dev_priv
->dma_status_page
);
229 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
234 static int i915_dma_init(struct drm_device
*dev
, void *data
,
235 struct drm_file
*file_priv
)
237 drm_i915_init_t
*init
= data
;
240 switch (init
->func
) {
242 retcode
= i915_initialize(dev
, init
);
244 case I915_CLEANUP_DMA
:
245 retcode
= i915_dma_cleanup(dev
);
247 case I915_RESUME_DMA
:
248 retcode
= i915_dma_resume(dev
);
258 /* Implement basically the same security restrictions as hardware does
259 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
261 * Most of the calculations below involve calculating the size of a
262 * particular instruction. It's important to get the size right as
263 * that tells us where the next instruction to check is. Any illegal
264 * instruction detected will be given a size of zero, which is a
265 * signal to abort the rest of the buffer.
267 static int do_validate_cmd(int cmd
)
269 switch (((cmd
>> 29) & 0x7)) {
271 switch ((cmd
>> 23) & 0x3f) {
273 return 1; /* MI_NOOP */
275 return 1; /* MI_FLUSH */
277 return 0; /* disallow everything else */
281 return 0; /* reserved */
283 return (cmd
& 0xff) + 2; /* 2d commands */
285 if (((cmd
>> 24) & 0x1f) <= 0x18)
288 switch ((cmd
>> 24) & 0x1f) {
292 switch ((cmd
>> 16) & 0xff) {
294 return (cmd
& 0x1f) + 2;
296 return (cmd
& 0xf) + 2;
298 return (cmd
& 0xffff) + 2;
302 return (cmd
& 0xffff) + 1;
306 if ((cmd
& (1 << 23)) == 0) /* inline vertices */
307 return (cmd
& 0x1ffff) + 2;
308 else if (cmd
& (1 << 17)) /* indirect random */
309 if ((cmd
& 0xffff) == 0)
310 return 0; /* unknown length, too hard */
312 return (((cmd
& 0xffff) + 1) / 2) + 1;
314 return 2; /* indirect sequential */
325 static int validate_cmd(int cmd
)
327 int ret
= do_validate_cmd(cmd
);
329 /* printk("validate_cmd( %x ): %d\n", cmd, ret); */
334 static int i915_emit_cmds(struct drm_device
* dev
, int *buffer
, int dwords
)
336 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
339 if ((dwords
+1) * sizeof(int) >= dev_priv
->render_ring
.size
- 8)
342 BEGIN_LP_RING((dwords
+1)&~1);
344 for (i
= 0; i
< dwords
;) {
349 if ((sz
= validate_cmd(cmd
)) == 0 || i
+ sz
> dwords
)
368 i915_emit_box(struct drm_device
*dev
,
369 struct drm_clip_rect
*boxes
,
370 int i
, int DR1
, int DR4
)
372 struct drm_clip_rect box
= boxes
[i
];
374 if (box
.y2
<= box
.y1
|| box
.x2
<= box
.x1
|| box
.y2
<= 0 || box
.x2
<= 0) {
375 DRM_ERROR("Bad box %d,%d..%d,%d\n",
376 box
.x1
, box
.y1
, box
.x2
, box
.y2
);
380 if (INTEL_INFO(dev
)->gen
>= 4) {
382 OUT_RING(GFX_OP_DRAWRECT_INFO_I965
);
383 OUT_RING((box
.x1
& 0xffff) | (box
.y1
<< 16));
384 OUT_RING(((box
.x2
- 1) & 0xffff) | ((box
.y2
- 1) << 16));
389 OUT_RING(GFX_OP_DRAWRECT_INFO
);
391 OUT_RING((box
.x1
& 0xffff) | (box
.y1
<< 16));
392 OUT_RING(((box
.x2
- 1) & 0xffff) | ((box
.y2
- 1) << 16));
401 /* XXX: Emitting the counter should really be moved to part of the IRQ
402 * emit. For now, do it in both places:
405 static void i915_emit_breadcrumb(struct drm_device
*dev
)
407 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
408 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
411 if (dev_priv
->counter
> 0x7FFFFFFFUL
)
412 dev_priv
->counter
= 0;
413 if (master_priv
->sarea_priv
)
414 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
;
417 OUT_RING(MI_STORE_DWORD_INDEX
);
418 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
419 OUT_RING(dev_priv
->counter
);
424 static int i915_dispatch_cmdbuffer(struct drm_device
* dev
,
425 drm_i915_cmdbuffer_t
*cmd
,
426 struct drm_clip_rect
*cliprects
,
429 int nbox
= cmd
->num_cliprects
;
430 int i
= 0, count
, ret
;
433 DRM_ERROR("alignment");
437 i915_kernel_lost_context(dev
);
439 count
= nbox
? nbox
: 1;
441 for (i
= 0; i
< count
; i
++) {
443 ret
= i915_emit_box(dev
, cliprects
, i
,
449 ret
= i915_emit_cmds(dev
, cmdbuf
, cmd
->sz
/ 4);
454 i915_emit_breadcrumb(dev
);
458 static int i915_dispatch_batchbuffer(struct drm_device
* dev
,
459 drm_i915_batchbuffer_t
* batch
,
460 struct drm_clip_rect
*cliprects
)
462 int nbox
= batch
->num_cliprects
;
465 if ((batch
->start
| batch
->used
) & 0x7) {
466 DRM_ERROR("alignment");
470 i915_kernel_lost_context(dev
);
472 count
= nbox
? nbox
: 1;
474 for (i
= 0; i
< count
; i
++) {
476 int ret
= i915_emit_box(dev
, cliprects
, i
,
477 batch
->DR1
, batch
->DR4
);
482 if (!IS_I830(dev
) && !IS_845G(dev
)) {
484 if (INTEL_INFO(dev
)->gen
>= 4) {
485 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6) | MI_BATCH_NON_SECURE_I965
);
486 OUT_RING(batch
->start
);
488 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6));
489 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
494 OUT_RING(MI_BATCH_BUFFER
);
495 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
496 OUT_RING(batch
->start
+ batch
->used
- 4);
503 if (IS_G4X(dev
) || IS_GEN5(dev
)) {
505 OUT_RING(MI_FLUSH
| MI_NO_WRITE_FLUSH
| MI_INVALIDATE_ISP
);
509 i915_emit_breadcrumb(dev
);
514 static int i915_dispatch_flip(struct drm_device
* dev
)
516 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
517 struct drm_i915_master_private
*master_priv
=
518 dev
->primary
->master
->driver_priv
;
520 if (!master_priv
->sarea_priv
)
523 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
525 dev_priv
->current_page
,
526 master_priv
->sarea_priv
->pf_current_page
);
528 i915_kernel_lost_context(dev
);
531 OUT_RING(MI_FLUSH
| MI_READ_FLUSH
);
536 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO
| ASYNC_FLIP
);
538 if (dev_priv
->current_page
== 0) {
539 OUT_RING(dev_priv
->back_offset
);
540 dev_priv
->current_page
= 1;
542 OUT_RING(dev_priv
->front_offset
);
543 dev_priv
->current_page
= 0;
549 OUT_RING(MI_WAIT_FOR_EVENT
| MI_WAIT_FOR_PLANE_A_FLIP
);
553 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
++;
556 OUT_RING(MI_STORE_DWORD_INDEX
);
557 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
558 OUT_RING(dev_priv
->counter
);
562 master_priv
->sarea_priv
->pf_current_page
= dev_priv
->current_page
;
566 static int i915_quiescent(struct drm_device
* dev
)
568 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
570 i915_kernel_lost_context(dev
);
571 return intel_wait_ring_buffer(dev
, &dev_priv
->render_ring
,
572 dev_priv
->render_ring
.size
- 8);
575 static int i915_flush_ioctl(struct drm_device
*dev
, void *data
,
576 struct drm_file
*file_priv
)
580 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
582 mutex_lock(&dev
->struct_mutex
);
583 ret
= i915_quiescent(dev
);
584 mutex_unlock(&dev
->struct_mutex
);
589 static int i915_batchbuffer(struct drm_device
*dev
, void *data
,
590 struct drm_file
*file_priv
)
592 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
593 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
594 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
595 master_priv
->sarea_priv
;
596 drm_i915_batchbuffer_t
*batch
= data
;
598 struct drm_clip_rect
*cliprects
= NULL
;
600 if (!dev_priv
->allow_batchbuffer
) {
601 DRM_ERROR("Batchbuffer ioctl disabled\n");
605 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
606 batch
->start
, batch
->used
, batch
->num_cliprects
);
608 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
610 if (batch
->num_cliprects
< 0)
613 if (batch
->num_cliprects
) {
614 cliprects
= kcalloc(batch
->num_cliprects
,
615 sizeof(struct drm_clip_rect
),
617 if (cliprects
== NULL
)
620 ret
= copy_from_user(cliprects
, batch
->cliprects
,
621 batch
->num_cliprects
*
622 sizeof(struct drm_clip_rect
));
629 mutex_lock(&dev
->struct_mutex
);
630 ret
= i915_dispatch_batchbuffer(dev
, batch
, cliprects
);
631 mutex_unlock(&dev
->struct_mutex
);
634 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
642 static int i915_cmdbuffer(struct drm_device
*dev
, void *data
,
643 struct drm_file
*file_priv
)
645 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
646 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
647 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
648 master_priv
->sarea_priv
;
649 drm_i915_cmdbuffer_t
*cmdbuf
= data
;
650 struct drm_clip_rect
*cliprects
= NULL
;
654 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
655 cmdbuf
->buf
, cmdbuf
->sz
, cmdbuf
->num_cliprects
);
657 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
659 if (cmdbuf
->num_cliprects
< 0)
662 batch_data
= kmalloc(cmdbuf
->sz
, GFP_KERNEL
);
663 if (batch_data
== NULL
)
666 ret
= copy_from_user(batch_data
, cmdbuf
->buf
, cmdbuf
->sz
);
669 goto fail_batch_free
;
672 if (cmdbuf
->num_cliprects
) {
673 cliprects
= kcalloc(cmdbuf
->num_cliprects
,
674 sizeof(struct drm_clip_rect
), GFP_KERNEL
);
675 if (cliprects
== NULL
) {
677 goto fail_batch_free
;
680 ret
= copy_from_user(cliprects
, cmdbuf
->cliprects
,
681 cmdbuf
->num_cliprects
*
682 sizeof(struct drm_clip_rect
));
689 mutex_lock(&dev
->struct_mutex
);
690 ret
= i915_dispatch_cmdbuffer(dev
, cmdbuf
, cliprects
, batch_data
);
691 mutex_unlock(&dev
->struct_mutex
);
693 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
698 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
708 static int i915_flip_bufs(struct drm_device
*dev
, void *data
,
709 struct drm_file
*file_priv
)
713 DRM_DEBUG_DRIVER("%s\n", __func__
);
715 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
717 mutex_lock(&dev
->struct_mutex
);
718 ret
= i915_dispatch_flip(dev
);
719 mutex_unlock(&dev
->struct_mutex
);
724 static int i915_getparam(struct drm_device
*dev
, void *data
,
725 struct drm_file
*file_priv
)
727 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
728 drm_i915_getparam_t
*param
= data
;
732 DRM_ERROR("called with no initialization\n");
736 switch (param
->param
) {
737 case I915_PARAM_IRQ_ACTIVE
:
738 value
= dev
->pdev
->irq
? 1 : 0;
740 case I915_PARAM_ALLOW_BATCHBUFFER
:
741 value
= dev_priv
->allow_batchbuffer
? 1 : 0;
743 case I915_PARAM_LAST_DISPATCH
:
744 value
= READ_BREADCRUMB(dev_priv
);
746 case I915_PARAM_CHIPSET_ID
:
747 value
= dev
->pci_device
;
749 case I915_PARAM_HAS_GEM
:
750 value
= dev_priv
->has_gem
;
752 case I915_PARAM_NUM_FENCES_AVAIL
:
753 value
= dev_priv
->num_fence_regs
- dev_priv
->fence_reg_start
;
755 case I915_PARAM_HAS_OVERLAY
:
756 value
= dev_priv
->overlay
? 1 : 0;
758 case I915_PARAM_HAS_PAGEFLIPPING
:
761 case I915_PARAM_HAS_EXECBUF2
:
763 value
= dev_priv
->has_gem
;
765 case I915_PARAM_HAS_BSD
:
766 value
= HAS_BSD(dev
);
768 case I915_PARAM_HAS_BLT
:
769 value
= HAS_BLT(dev
);
771 case I915_PARAM_HAS_COHERENT_RINGS
:
775 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
780 if (DRM_COPY_TO_USER(param
->value
, &value
, sizeof(int))) {
781 DRM_ERROR("DRM_COPY_TO_USER failed\n");
788 static int i915_setparam(struct drm_device
*dev
, void *data
,
789 struct drm_file
*file_priv
)
791 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
792 drm_i915_setparam_t
*param
= data
;
795 DRM_ERROR("called with no initialization\n");
799 switch (param
->param
) {
800 case I915_SETPARAM_USE_MI_BATCHBUFFER_START
:
802 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY
:
803 dev_priv
->tex_lru_log_granularity
= param
->value
;
805 case I915_SETPARAM_ALLOW_BATCHBUFFER
:
806 dev_priv
->allow_batchbuffer
= param
->value
;
808 case I915_SETPARAM_NUM_USED_FENCES
:
809 if (param
->value
> dev_priv
->num_fence_regs
||
812 /* Userspace can use first N regs */
813 dev_priv
->fence_reg_start
= param
->value
;
816 DRM_DEBUG_DRIVER("unknown parameter %d\n",
824 static int i915_set_status_page(struct drm_device
*dev
, void *data
,
825 struct drm_file
*file_priv
)
827 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
828 drm_i915_hws_addr_t
*hws
= data
;
829 struct intel_ring_buffer
*ring
= &dev_priv
->render_ring
;
831 if (!I915_NEED_GFX_HWS(dev
))
835 DRM_ERROR("called with no initialization\n");
839 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
840 WARN(1, "tried to set status page when mode setting active\n");
844 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32
)hws
->addr
);
846 ring
->status_page
.gfx_addr
= hws
->addr
& (0x1ffff<<12);
848 dev_priv
->hws_map
.offset
= dev
->agp
->base
+ hws
->addr
;
849 dev_priv
->hws_map
.size
= 4*1024;
850 dev_priv
->hws_map
.type
= 0;
851 dev_priv
->hws_map
.flags
= 0;
852 dev_priv
->hws_map
.mtrr
= 0;
854 drm_core_ioremap_wc(&dev_priv
->hws_map
, dev
);
855 if (dev_priv
->hws_map
.handle
== NULL
) {
856 i915_dma_cleanup(dev
);
857 ring
->status_page
.gfx_addr
= 0;
858 DRM_ERROR("can not ioremap virtual address for"
859 " G33 hw status page\n");
862 ring
->status_page
.page_addr
= dev_priv
->hws_map
.handle
;
863 memset(ring
->status_page
.page_addr
, 0, PAGE_SIZE
);
864 I915_WRITE(HWS_PGA
, ring
->status_page
.gfx_addr
);
866 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
867 ring
->status_page
.gfx_addr
);
868 DRM_DEBUG_DRIVER("load hws at %p\n",
869 ring
->status_page
.page_addr
);
873 static int i915_get_bridge_dev(struct drm_device
*dev
)
875 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
877 dev_priv
->bridge_dev
= pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
878 if (!dev_priv
->bridge_dev
) {
879 DRM_ERROR("bridge device not found\n");
885 #define MCHBAR_I915 0x44
886 #define MCHBAR_I965 0x48
887 #define MCHBAR_SIZE (4*4096)
889 #define DEVEN_REG 0x54
890 #define DEVEN_MCHBAR_EN (1 << 28)
892 /* Allocate space for the MCH regs if needed, return nonzero on error */
894 intel_alloc_mchbar_resource(struct drm_device
*dev
)
896 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
897 int reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
898 u32 temp_lo
, temp_hi
= 0;
902 if (INTEL_INFO(dev
)->gen
>= 4)
903 pci_read_config_dword(dev_priv
->bridge_dev
, reg
+ 4, &temp_hi
);
904 pci_read_config_dword(dev_priv
->bridge_dev
, reg
, &temp_lo
);
905 mchbar_addr
= ((u64
)temp_hi
<< 32) | temp_lo
;
907 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
910 pnp_range_reserved(mchbar_addr
, mchbar_addr
+ MCHBAR_SIZE
))
914 /* Get some space for it */
915 dev_priv
->mch_res
.name
= "i915 MCHBAR";
916 dev_priv
->mch_res
.flags
= IORESOURCE_MEM
;
917 ret
= pci_bus_alloc_resource(dev_priv
->bridge_dev
->bus
,
919 MCHBAR_SIZE
, MCHBAR_SIZE
,
921 0, pcibios_align_resource
,
922 dev_priv
->bridge_dev
);
924 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret
);
925 dev_priv
->mch_res
.start
= 0;
929 if (INTEL_INFO(dev
)->gen
>= 4)
930 pci_write_config_dword(dev_priv
->bridge_dev
, reg
+ 4,
931 upper_32_bits(dev_priv
->mch_res
.start
));
933 pci_write_config_dword(dev_priv
->bridge_dev
, reg
,
934 lower_32_bits(dev_priv
->mch_res
.start
));
938 /* Setup MCHBAR if possible, return true if we should disable it again */
940 intel_setup_mchbar(struct drm_device
*dev
)
942 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
943 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
947 dev_priv
->mchbar_need_disable
= false;
949 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
950 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
951 enabled
= !!(temp
& DEVEN_MCHBAR_EN
);
953 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
957 /* If it's already enabled, don't have to do anything */
961 if (intel_alloc_mchbar_resource(dev
))
964 dev_priv
->mchbar_need_disable
= true;
966 /* Space is allocated or reserved, so enable it. */
967 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
968 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
,
969 temp
| DEVEN_MCHBAR_EN
);
971 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
972 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
| 1);
977 intel_teardown_mchbar(struct drm_device
*dev
)
979 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
980 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
983 if (dev_priv
->mchbar_need_disable
) {
984 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
985 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
986 temp
&= ~DEVEN_MCHBAR_EN
;
987 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, temp
);
989 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
991 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
);
995 if (dev_priv
->mch_res
.start
)
996 release_resource(&dev_priv
->mch_res
);
999 #define PTE_ADDRESS_MASK 0xfffff000
1000 #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
1001 #define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
1002 #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
1003 #define PTE_MAPPING_TYPE_CACHED (3 << 1)
1004 #define PTE_MAPPING_TYPE_MASK (3 << 1)
1005 #define PTE_VALID (1 << 0)
1008 * i915_gtt_to_phys - take a GTT address and turn it into a physical one
1010 * @gtt_addr: address to translate
1012 * Some chip functions require allocations from stolen space but need the
1013 * physical address of the memory in question. We use this routine
1014 * to get a physical address suitable for register programming from a given
1017 static unsigned long i915_gtt_to_phys(struct drm_device
*dev
,
1018 unsigned long gtt_addr
)
1021 unsigned long entry
, phys
;
1022 int gtt_bar
= IS_GEN2(dev
) ? 1 : 0;
1023 int gtt_offset
, gtt_size
;
1025 if (INTEL_INFO(dev
)->gen
>= 4) {
1026 if (IS_G4X(dev
) || INTEL_INFO(dev
)->gen
> 4) {
1027 gtt_offset
= 2*1024*1024;
1028 gtt_size
= 2*1024*1024;
1030 gtt_offset
= 512*1024;
1031 gtt_size
= 512*1024;
1036 gtt_size
= pci_resource_len(dev
->pdev
, gtt_bar
);
1039 gtt
= ioremap_wc(pci_resource_start(dev
->pdev
, gtt_bar
) + gtt_offset
,
1042 DRM_ERROR("ioremap of GTT failed\n");
1046 entry
= *(volatile u32
*)(gtt
+ (gtt_addr
/ 1024));
1048 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr
, entry
);
1050 /* Mask out these reserved bits on this hardware. */
1051 if (INTEL_INFO(dev
)->gen
< 4 && !IS_G33(dev
))
1052 entry
&= ~PTE_ADDRESS_MASK_HIGH
;
1054 /* If it's not a mapping type we know, then bail. */
1055 if ((entry
& PTE_MAPPING_TYPE_MASK
) != PTE_MAPPING_TYPE_UNCACHED
&&
1056 (entry
& PTE_MAPPING_TYPE_MASK
) != PTE_MAPPING_TYPE_CACHED
) {
1061 if (!(entry
& PTE_VALID
)) {
1062 DRM_ERROR("bad GTT entry in stolen space\n");
1069 phys
=(entry
& PTE_ADDRESS_MASK
) |
1070 ((uint64_t)(entry
& PTE_ADDRESS_MASK_HIGH
) << (32 - 4));
1072 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr
, phys
);
1077 static void i915_warn_stolen(struct drm_device
*dev
)
1079 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
1080 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
1083 static void i915_setup_compression(struct drm_device
*dev
, int size
)
1085 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1086 struct drm_mm_node
*compressed_fb
, *uninitialized_var(compressed_llb
);
1087 unsigned long cfb_base
;
1088 unsigned long ll_base
= 0;
1090 /* Leave 1M for line length buffer & misc. */
1091 compressed_fb
= drm_mm_search_free(&dev_priv
->mm
.vram
, size
, 4096, 0);
1092 if (!compressed_fb
) {
1093 dev_priv
->no_fbc_reason
= FBC_STOLEN_TOO_SMALL
;
1094 i915_warn_stolen(dev
);
1098 compressed_fb
= drm_mm_get_block(compressed_fb
, size
, 4096);
1099 if (!compressed_fb
) {
1100 i915_warn_stolen(dev
);
1101 dev_priv
->no_fbc_reason
= FBC_STOLEN_TOO_SMALL
;
1105 cfb_base
= i915_gtt_to_phys(dev
, compressed_fb
->start
);
1107 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
1108 drm_mm_put_block(compressed_fb
);
1111 if (!(IS_GM45(dev
) || IS_IRONLAKE_M(dev
))) {
1112 compressed_llb
= drm_mm_search_free(&dev_priv
->mm
.vram
, 4096,
1114 if (!compressed_llb
) {
1115 i915_warn_stolen(dev
);
1119 compressed_llb
= drm_mm_get_block(compressed_llb
, 4096, 4096);
1120 if (!compressed_llb
) {
1121 i915_warn_stolen(dev
);
1125 ll_base
= i915_gtt_to_phys(dev
, compressed_llb
->start
);
1127 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
1128 drm_mm_put_block(compressed_fb
);
1129 drm_mm_put_block(compressed_llb
);
1133 dev_priv
->cfb_size
= size
;
1135 intel_disable_fbc(dev
);
1136 dev_priv
->compressed_fb
= compressed_fb
;
1137 if (IS_IRONLAKE_M(dev
))
1138 I915_WRITE(ILK_DPFC_CB_BASE
, compressed_fb
->start
);
1139 else if (IS_GM45(dev
)) {
1140 I915_WRITE(DPFC_CB_BASE
, compressed_fb
->start
);
1142 I915_WRITE(FBC_CFB_BASE
, cfb_base
);
1143 I915_WRITE(FBC_LL_BASE
, ll_base
);
1144 dev_priv
->compressed_llb
= compressed_llb
;
1147 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base
,
1148 ll_base
, size
>> 20);
1151 static void i915_cleanup_compression(struct drm_device
*dev
)
1153 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1155 drm_mm_put_block(dev_priv
->compressed_fb
);
1156 if (dev_priv
->compressed_llb
)
1157 drm_mm_put_block(dev_priv
->compressed_llb
);
1160 /* true = enable decode, false = disable decoder */
1161 static unsigned int i915_vga_set_decode(void *cookie
, bool state
)
1163 struct drm_device
*dev
= cookie
;
1165 intel_modeset_vga_set_state(dev
, state
);
1167 return VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
|
1168 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1170 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1173 static void i915_switcheroo_set_state(struct pci_dev
*pdev
, enum vga_switcheroo_state state
)
1175 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1176 pm_message_t pmm
= { .event
= PM_EVENT_SUSPEND
};
1177 if (state
== VGA_SWITCHEROO_ON
) {
1178 printk(KERN_INFO
"i915: switched on\n");
1179 /* i915 resume handler doesn't set to D0 */
1180 pci_set_power_state(dev
->pdev
, PCI_D0
);
1183 printk(KERN_ERR
"i915: switched off\n");
1184 i915_suspend(dev
, pmm
);
1188 static bool i915_switcheroo_can_switch(struct pci_dev
*pdev
)
1190 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1193 spin_lock(&dev
->count_lock
);
1194 can_switch
= (dev
->open_count
== 0);
1195 spin_unlock(&dev
->count_lock
);
1199 static int i915_load_modeset_init(struct drm_device
*dev
,
1200 unsigned long prealloc_size
,
1201 unsigned long agp_size
)
1203 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1206 /* Basic memrange allocator for stolen space (aka mm.vram) */
1207 drm_mm_init(&dev_priv
->mm
.vram
, 0, prealloc_size
);
1209 /* Let GEM Manage from end of prealloc space to end of aperture.
1211 * However, leave one page at the end still bound to the scratch page.
1212 * There are a number of places where the hardware apparently
1213 * prefetches past the end of the object, and we've seen multiple
1214 * hangs with the GPU head pointer stuck in a batchbuffer bound
1215 * at the last page of the aperture. One page should be enough to
1216 * keep any prefetching inside of the aperture.
1218 i915_gem_do_init(dev
, prealloc_size
, agp_size
- 4096);
1220 mutex_lock(&dev
->struct_mutex
);
1221 ret
= i915_gem_init_ringbuffer(dev
);
1222 mutex_unlock(&dev
->struct_mutex
);
1226 /* Try to set up FBC with a reasonable compressed buffer size */
1227 if (I915_HAS_FBC(dev
) && i915_powersave
) {
1230 /* Try to get an 8M buffer... */
1231 if (prealloc_size
> (9*1024*1024))
1232 cfb_size
= 8*1024*1024;
1233 else /* fall back to 7/8 of the stolen space */
1234 cfb_size
= prealloc_size
* 7 / 8;
1235 i915_setup_compression(dev
, cfb_size
);
1238 /* Allow hardware batchbuffers unless told otherwise.
1240 dev_priv
->allow_batchbuffer
= 1;
1242 ret
= intel_parse_bios(dev
);
1244 DRM_INFO("failed to find VBIOS tables\n");
1246 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
1247 ret
= vga_client_register(dev
->pdev
, dev
, NULL
, i915_vga_set_decode
);
1249 goto cleanup_ringbuffer
;
1251 intel_register_dsm_handler();
1253 ret
= vga_switcheroo_register_client(dev
->pdev
,
1254 i915_switcheroo_set_state
,
1255 i915_switcheroo_can_switch
);
1257 goto cleanup_vga_client
;
1259 /* IIR "flip pending" bit means done if this bit is set */
1260 if (IS_GEN3(dev
) && (I915_READ(ECOSKPD
) & ECO_FLIP_DONE
))
1261 dev_priv
->flip_pending_is_done
= true;
1263 intel_modeset_init(dev
);
1265 ret
= drm_irq_install(dev
);
1267 goto cleanup_vga_switcheroo
;
1269 /* Always safe in the mode setting case. */
1270 /* FIXME: do pre/post-mode set stuff in core KMS code */
1271 dev
->vblank_disable_allowed
= 1;
1273 ret
= intel_fbdev_init(dev
);
1277 drm_kms_helper_poll_init(dev
);
1279 /* We're off and running w/KMS */
1280 dev_priv
->mm
.suspended
= 0;
1285 drm_irq_uninstall(dev
);
1286 cleanup_vga_switcheroo
:
1287 vga_switcheroo_unregister_client(dev
->pdev
);
1289 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
1291 mutex_lock(&dev
->struct_mutex
);
1292 i915_gem_cleanup_ringbuffer(dev
);
1293 mutex_unlock(&dev
->struct_mutex
);
1298 int i915_master_create(struct drm_device
*dev
, struct drm_master
*master
)
1300 struct drm_i915_master_private
*master_priv
;
1302 master_priv
= kzalloc(sizeof(*master_priv
), GFP_KERNEL
);
1306 master
->driver_priv
= master_priv
;
1310 void i915_master_destroy(struct drm_device
*dev
, struct drm_master
*master
)
1312 struct drm_i915_master_private
*master_priv
= master
->driver_priv
;
1319 master
->driver_priv
= NULL
;
1322 static void i915_pineview_get_mem_freq(struct drm_device
*dev
)
1324 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1327 tmp
= I915_READ(CLKCFG
);
1329 switch (tmp
& CLKCFG_FSB_MASK
) {
1330 case CLKCFG_FSB_533
:
1331 dev_priv
->fsb_freq
= 533; /* 133*4 */
1333 case CLKCFG_FSB_800
:
1334 dev_priv
->fsb_freq
= 800; /* 200*4 */
1336 case CLKCFG_FSB_667
:
1337 dev_priv
->fsb_freq
= 667; /* 167*4 */
1339 case CLKCFG_FSB_400
:
1340 dev_priv
->fsb_freq
= 400; /* 100*4 */
1344 switch (tmp
& CLKCFG_MEM_MASK
) {
1345 case CLKCFG_MEM_533
:
1346 dev_priv
->mem_freq
= 533;
1348 case CLKCFG_MEM_667
:
1349 dev_priv
->mem_freq
= 667;
1351 case CLKCFG_MEM_800
:
1352 dev_priv
->mem_freq
= 800;
1356 /* detect pineview DDR3 setting */
1357 tmp
= I915_READ(CSHRDDR3CTL
);
1358 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
1361 static void i915_ironlake_get_mem_freq(struct drm_device
*dev
)
1363 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1366 ddrpll
= I915_READ16(DDRMPLL1
);
1367 csipll
= I915_READ16(CSIPLL0
);
1369 switch (ddrpll
& 0xff) {
1371 dev_priv
->mem_freq
= 800;
1374 dev_priv
->mem_freq
= 1066;
1377 dev_priv
->mem_freq
= 1333;
1380 dev_priv
->mem_freq
= 1600;
1383 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
1385 dev_priv
->mem_freq
= 0;
1389 dev_priv
->r_t
= dev_priv
->mem_freq
;
1391 switch (csipll
& 0x3ff) {
1393 dev_priv
->fsb_freq
= 3200;
1396 dev_priv
->fsb_freq
= 3733;
1399 dev_priv
->fsb_freq
= 4266;
1402 dev_priv
->fsb_freq
= 4800;
1405 dev_priv
->fsb_freq
= 5333;
1408 dev_priv
->fsb_freq
= 5866;
1411 dev_priv
->fsb_freq
= 6400;
1414 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
1416 dev_priv
->fsb_freq
= 0;
1420 if (dev_priv
->fsb_freq
== 3200) {
1422 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
1431 unsigned long vd
; /* in .1 mil */
1432 unsigned long vm
; /* in .1 mil */
1436 static struct v_table v_table
[] = {
1437 { 0, 16125, 15000, 0x7f, },
1438 { 1, 16000, 14875, 0x7e, },
1439 { 2, 15875, 14750, 0x7d, },
1440 { 3, 15750, 14625, 0x7c, },
1441 { 4, 15625, 14500, 0x7b, },
1442 { 5, 15500, 14375, 0x7a, },
1443 { 6, 15375, 14250, 0x79, },
1444 { 7, 15250, 14125, 0x78, },
1445 { 8, 15125, 14000, 0x77, },
1446 { 9, 15000, 13875, 0x76, },
1447 { 10, 14875, 13750, 0x75, },
1448 { 11, 14750, 13625, 0x74, },
1449 { 12, 14625, 13500, 0x73, },
1450 { 13, 14500, 13375, 0x72, },
1451 { 14, 14375, 13250, 0x71, },
1452 { 15, 14250, 13125, 0x70, },
1453 { 16, 14125, 13000, 0x6f, },
1454 { 17, 14000, 12875, 0x6e, },
1455 { 18, 13875, 12750, 0x6d, },
1456 { 19, 13750, 12625, 0x6c, },
1457 { 20, 13625, 12500, 0x6b, },
1458 { 21, 13500, 12375, 0x6a, },
1459 { 22, 13375, 12250, 0x69, },
1460 { 23, 13250, 12125, 0x68, },
1461 { 24, 13125, 12000, 0x67, },
1462 { 25, 13000, 11875, 0x66, },
1463 { 26, 12875, 11750, 0x65, },
1464 { 27, 12750, 11625, 0x64, },
1465 { 28, 12625, 11500, 0x63, },
1466 { 29, 12500, 11375, 0x62, },
1467 { 30, 12375, 11250, 0x61, },
1468 { 31, 12250, 11125, 0x60, },
1469 { 32, 12125, 11000, 0x5f, },
1470 { 33, 12000, 10875, 0x5e, },
1471 { 34, 11875, 10750, 0x5d, },
1472 { 35, 11750, 10625, 0x5c, },
1473 { 36, 11625, 10500, 0x5b, },
1474 { 37, 11500, 10375, 0x5a, },
1475 { 38, 11375, 10250, 0x59, },
1476 { 39, 11250, 10125, 0x58, },
1477 { 40, 11125, 10000, 0x57, },
1478 { 41, 11000, 9875, 0x56, },
1479 { 42, 10875, 9750, 0x55, },
1480 { 43, 10750, 9625, 0x54, },
1481 { 44, 10625, 9500, 0x53, },
1482 { 45, 10500, 9375, 0x52, },
1483 { 46, 10375, 9250, 0x51, },
1484 { 47, 10250, 9125, 0x50, },
1485 { 48, 10125, 9000, 0x4f, },
1486 { 49, 10000, 8875, 0x4e, },
1487 { 50, 9875, 8750, 0x4d, },
1488 { 51, 9750, 8625, 0x4c, },
1489 { 52, 9625, 8500, 0x4b, },
1490 { 53, 9500, 8375, 0x4a, },
1491 { 54, 9375, 8250, 0x49, },
1492 { 55, 9250, 8125, 0x48, },
1493 { 56, 9125, 8000, 0x47, },
1494 { 57, 9000, 7875, 0x46, },
1495 { 58, 8875, 7750, 0x45, },
1496 { 59, 8750, 7625, 0x44, },
1497 { 60, 8625, 7500, 0x43, },
1498 { 61, 8500, 7375, 0x42, },
1499 { 62, 8375, 7250, 0x41, },
1500 { 63, 8250, 7125, 0x40, },
1501 { 64, 8125, 7000, 0x3f, },
1502 { 65, 8000, 6875, 0x3e, },
1503 { 66, 7875, 6750, 0x3d, },
1504 { 67, 7750, 6625, 0x3c, },
1505 { 68, 7625, 6500, 0x3b, },
1506 { 69, 7500, 6375, 0x3a, },
1507 { 70, 7375, 6250, 0x39, },
1508 { 71, 7250, 6125, 0x38, },
1509 { 72, 7125, 6000, 0x37, },
1510 { 73, 7000, 5875, 0x36, },
1511 { 74, 6875, 5750, 0x35, },
1512 { 75, 6750, 5625, 0x34, },
1513 { 76, 6625, 5500, 0x33, },
1514 { 77, 6500, 5375, 0x32, },
1515 { 78, 6375, 5250, 0x31, },
1516 { 79, 6250, 5125, 0x30, },
1517 { 80, 6125, 5000, 0x2f, },
1518 { 81, 6000, 4875, 0x2e, },
1519 { 82, 5875, 4750, 0x2d, },
1520 { 83, 5750, 4625, 0x2c, },
1521 { 84, 5625, 4500, 0x2b, },
1522 { 85, 5500, 4375, 0x2a, },
1523 { 86, 5375, 4250, 0x29, },
1524 { 87, 5250, 4125, 0x28, },
1525 { 88, 5125, 4000, 0x27, },
1526 { 89, 5000, 3875, 0x26, },
1527 { 90, 4875, 3750, 0x25, },
1528 { 91, 4750, 3625, 0x24, },
1529 { 92, 4625, 3500, 0x23, },
1530 { 93, 4500, 3375, 0x22, },
1531 { 94, 4375, 3250, 0x21, },
1532 { 95, 4250, 3125, 0x20, },
1533 { 96, 4125, 3000, 0x1f, },
1534 { 97, 4125, 3000, 0x1e, },
1535 { 98, 4125, 3000, 0x1d, },
1536 { 99, 4125, 3000, 0x1c, },
1537 { 100, 4125, 3000, 0x1b, },
1538 { 101, 4125, 3000, 0x1a, },
1539 { 102, 4125, 3000, 0x19, },
1540 { 103, 4125, 3000, 0x18, },
1541 { 104, 4125, 3000, 0x17, },
1542 { 105, 4125, 3000, 0x16, },
1543 { 106, 4125, 3000, 0x15, },
1544 { 107, 4125, 3000, 0x14, },
1545 { 108, 4125, 3000, 0x13, },
1546 { 109, 4125, 3000, 0x12, },
1547 { 110, 4125, 3000, 0x11, },
1548 { 111, 4125, 3000, 0x10, },
1549 { 112, 4125, 3000, 0x0f, },
1550 { 113, 4125, 3000, 0x0e, },
1551 { 114, 4125, 3000, 0x0d, },
1552 { 115, 4125, 3000, 0x0c, },
1553 { 116, 4125, 3000, 0x0b, },
1554 { 117, 4125, 3000, 0x0a, },
1555 { 118, 4125, 3000, 0x09, },
1556 { 119, 4125, 3000, 0x08, },
1557 { 120, 1125, 0, 0x07, },
1558 { 121, 1000, 0, 0x06, },
1559 { 122, 875, 0, 0x05, },
1560 { 123, 750, 0, 0x04, },
1561 { 124, 625, 0, 0x03, },
1562 { 125, 500, 0, 0x02, },
1563 { 126, 375, 0, 0x01, },
1564 { 127, 0, 0, 0x00, },
1574 static struct cparams cparams
[] = {
1575 { 1, 1333, 301, 28664 },
1576 { 1, 1066, 294, 24460 },
1577 { 1, 800, 294, 25192 },
1578 { 0, 1333, 276, 27605 },
1579 { 0, 1066, 276, 27605 },
1580 { 0, 800, 231, 23784 },
1583 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
1585 u64 total_count
, diff
, ret
;
1586 u32 count1
, count2
, count3
, m
= 0, c
= 0;
1587 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
1590 diff1
= now
- dev_priv
->last_time1
;
1592 count1
= I915_READ(DMIEC
);
1593 count2
= I915_READ(DDREC
);
1594 count3
= I915_READ(CSIEC
);
1596 total_count
= count1
+ count2
+ count3
;
1598 /* FIXME: handle per-counter overflow */
1599 if (total_count
< dev_priv
->last_count1
) {
1600 diff
= ~0UL - dev_priv
->last_count1
;
1601 diff
+= total_count
;
1603 diff
= total_count
- dev_priv
->last_count1
;
1606 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
1607 if (cparams
[i
].i
== dev_priv
->c_m
&&
1608 cparams
[i
].t
== dev_priv
->r_t
) {
1615 diff
= div_u64(diff
, diff1
);
1616 ret
= ((m
* diff
) + c
);
1617 ret
= div_u64(ret
, 10);
1619 dev_priv
->last_count1
= total_count
;
1620 dev_priv
->last_time1
= now
;
1625 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
1627 unsigned long m
, x
, b
;
1630 tsfs
= I915_READ(TSFS
);
1632 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
1633 x
= I915_READ8(TR1
);
1635 b
= tsfs
& TSFS_INTR_MASK
;
1637 return ((m
* x
) / 127) - b
;
1640 static unsigned long pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
1642 unsigned long val
= 0;
1645 for (i
= 0; i
< ARRAY_SIZE(v_table
); i
++) {
1646 if (v_table
[i
].pvid
== pxvid
) {
1647 if (IS_MOBILE(dev_priv
->dev
))
1648 val
= v_table
[i
].vm
;
1650 val
= v_table
[i
].vd
;
1657 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
1659 struct timespec now
, diff1
;
1661 unsigned long diffms
;
1664 getrawmonotonic(&now
);
1665 diff1
= timespec_sub(now
, dev_priv
->last_time2
);
1667 /* Don't divide by 0 */
1668 diffms
= diff1
.tv_sec
* 1000 + diff1
.tv_nsec
/ 1000000;
1672 count
= I915_READ(GFXEC
);
1674 if (count
< dev_priv
->last_count2
) {
1675 diff
= ~0UL - dev_priv
->last_count2
;
1678 diff
= count
- dev_priv
->last_count2
;
1681 dev_priv
->last_count2
= count
;
1682 dev_priv
->last_time2
= now
;
1684 /* More magic constants... */
1686 diff
= div_u64(diff
, diffms
* 10);
1687 dev_priv
->gfx_power
= diff
;
1690 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
1692 unsigned long t
, corr
, state1
, corr2
, state2
;
1695 pxvid
= I915_READ(PXVFREQ_BASE
+ (dev_priv
->cur_delay
* 4));
1696 pxvid
= (pxvid
>> 24) & 0x7f;
1697 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
1701 t
= i915_mch_val(dev_priv
);
1703 /* Revel in the empirically derived constants */
1705 /* Correction factor in 1/100000 units */
1707 corr
= ((t
* 2349) + 135940);
1709 corr
= ((t
* 964) + 29317);
1711 corr
= ((t
* 301) + 1004);
1713 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
1715 corr2
= (corr
* dev_priv
->corr
);
1717 state2
= (corr2
* state1
) / 10000;
1718 state2
/= 100; /* convert to mW */
1720 i915_update_gfx_val(dev_priv
);
1722 return dev_priv
->gfx_power
+ state2
;
1725 /* Global for IPS driver to get at the current i915 device */
1726 static struct drm_i915_private
*i915_mch_dev
;
1728 * Lock protecting IPS related data structures
1730 * - dev_priv->max_delay
1731 * - dev_priv->min_delay
1733 * - dev_priv->gpu_busy
1735 static DEFINE_SPINLOCK(mchdev_lock
);
1738 * i915_read_mch_val - return value for IPS use
1740 * Calculate and return a value for the IPS driver to use when deciding whether
1741 * we have thermal and power headroom to increase CPU or GPU power budget.
1743 unsigned long i915_read_mch_val(void)
1745 struct drm_i915_private
*dev_priv
;
1746 unsigned long chipset_val
, graphics_val
, ret
= 0;
1748 spin_lock(&mchdev_lock
);
1751 dev_priv
= i915_mch_dev
;
1753 chipset_val
= i915_chipset_val(dev_priv
);
1754 graphics_val
= i915_gfx_val(dev_priv
);
1756 ret
= chipset_val
+ graphics_val
;
1759 spin_unlock(&mchdev_lock
);
1763 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
1766 * i915_gpu_raise - raise GPU frequency limit
1768 * Raise the limit; IPS indicates we have thermal headroom.
1770 bool i915_gpu_raise(void)
1772 struct drm_i915_private
*dev_priv
;
1775 spin_lock(&mchdev_lock
);
1776 if (!i915_mch_dev
) {
1780 dev_priv
= i915_mch_dev
;
1782 if (dev_priv
->max_delay
> dev_priv
->fmax
)
1783 dev_priv
->max_delay
--;
1786 spin_unlock(&mchdev_lock
);
1790 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
1793 * i915_gpu_lower - lower GPU frequency limit
1795 * IPS indicates we're close to a thermal limit, so throttle back the GPU
1796 * frequency maximum.
1798 bool i915_gpu_lower(void)
1800 struct drm_i915_private
*dev_priv
;
1803 spin_lock(&mchdev_lock
);
1804 if (!i915_mch_dev
) {
1808 dev_priv
= i915_mch_dev
;
1810 if (dev_priv
->max_delay
< dev_priv
->min_delay
)
1811 dev_priv
->max_delay
++;
1814 spin_unlock(&mchdev_lock
);
1818 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
1821 * i915_gpu_busy - indicate GPU business to IPS
1823 * Tell the IPS driver whether or not the GPU is busy.
1825 bool i915_gpu_busy(void)
1827 struct drm_i915_private
*dev_priv
;
1830 spin_lock(&mchdev_lock
);
1833 dev_priv
= i915_mch_dev
;
1835 ret
= dev_priv
->busy
;
1838 spin_unlock(&mchdev_lock
);
1842 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
1845 * i915_gpu_turbo_disable - disable graphics turbo
1847 * Disable graphics turbo by resetting the max frequency and setting the
1848 * current frequency to the default.
1850 bool i915_gpu_turbo_disable(void)
1852 struct drm_i915_private
*dev_priv
;
1855 spin_lock(&mchdev_lock
);
1856 if (!i915_mch_dev
) {
1860 dev_priv
= i915_mch_dev
;
1862 dev_priv
->max_delay
= dev_priv
->fstart
;
1864 if (!ironlake_set_drps(dev_priv
->dev
, dev_priv
->fstart
))
1868 spin_unlock(&mchdev_lock
);
1872 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
1875 * Tells the intel_ips driver that the i915 driver is now loaded, if
1876 * IPS got loaded first.
1878 * This awkward dance is so that neither module has to depend on the
1879 * other in order for IPS to do the appropriate communication of
1880 * GPU turbo limits to i915.
1883 ips_ping_for_i915_load(void)
1887 link
= symbol_get(ips_link_to_i915_driver
);
1890 symbol_put(ips_link_to_i915_driver
);
1895 * i915_driver_load - setup chip and create an initial config
1897 * @flags: startup flags
1899 * The driver load routine has to do several things:
1900 * - drive output discovery via intel_modeset_init()
1901 * - initialize the memory manager
1902 * - allocate initial config memory
1903 * - setup the DRM framebuffer with the allocated memory
1905 int i915_driver_load(struct drm_device
*dev
, unsigned long flags
)
1907 struct drm_i915_private
*dev_priv
;
1908 resource_size_t base
, size
;
1909 int ret
= 0, mmio_bar
;
1910 uint32_t agp_size
, prealloc_size
;
1911 /* i915 has 4 more counters */
1913 dev
->types
[6] = _DRM_STAT_IRQ
;
1914 dev
->types
[7] = _DRM_STAT_PRIMARY
;
1915 dev
->types
[8] = _DRM_STAT_SECONDARY
;
1916 dev
->types
[9] = _DRM_STAT_DMA
;
1918 dev_priv
= kzalloc(sizeof(drm_i915_private_t
), GFP_KERNEL
);
1919 if (dev_priv
== NULL
)
1922 dev
->dev_private
= (void *)dev_priv
;
1923 dev_priv
->dev
= dev
;
1924 dev_priv
->info
= (struct intel_device_info
*) flags
;
1926 /* Add register map (needed for suspend/resume) */
1927 mmio_bar
= IS_GEN2(dev
) ? 1 : 0;
1928 base
= pci_resource_start(dev
->pdev
, mmio_bar
);
1929 size
= pci_resource_len(dev
->pdev
, mmio_bar
);
1931 if (i915_get_bridge_dev(dev
)) {
1936 /* overlay on gen2 is broken and can't address above 1G */
1938 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(30));
1940 dev_priv
->regs
= ioremap(base
, size
);
1941 if (!dev_priv
->regs
) {
1942 DRM_ERROR("failed to map registers\n");
1947 dev_priv
->mm
.gtt_mapping
=
1948 io_mapping_create_wc(dev
->agp
->base
,
1949 dev
->agp
->agp_info
.aper_size
* 1024*1024);
1950 if (dev_priv
->mm
.gtt_mapping
== NULL
) {
1955 /* Set up a WC MTRR for non-PAT systems. This is more common than
1956 * one would think, because the kernel disables PAT on first
1957 * generation Core chips because WC PAT gets overridden by a UC
1958 * MTRR if present. Even if a UC MTRR isn't present.
1960 dev_priv
->mm
.gtt_mtrr
= mtrr_add(dev
->agp
->base
,
1961 dev
->agp
->agp_info
.aper_size
*
1963 MTRR_TYPE_WRCOMB
, 1);
1964 if (dev_priv
->mm
.gtt_mtrr
< 0) {
1965 DRM_INFO("MTRR allocation failed. Graphics "
1966 "performance may suffer.\n");
1969 dev_priv
->mm
.gtt
= intel_gtt_get();
1970 if (!dev_priv
->mm
.gtt
) {
1971 DRM_ERROR("Failed to initialize GTT\n");
1976 prealloc_size
= dev_priv
->mm
.gtt
->gtt_stolen_entries
<< PAGE_SHIFT
;
1977 agp_size
= dev_priv
->mm
.gtt
->gtt_mappable_entries
<< PAGE_SHIFT
;
1979 /* The i915 workqueue is primarily used for batched retirement of
1980 * requests (and thus managing bo) once the task has been completed
1981 * by the GPU. i915_gem_retire_requests() is called directly when we
1982 * need high-priority retirement, such as waiting for an explicit
1985 * It is also used for periodic low-priority events, such as
1986 * idle-timers and hangcheck.
1988 * All tasks on the workqueue are expected to acquire the dev mutex
1989 * so there is no point in running more than one instance of the
1990 * workqueue at any time: max_active = 1 and NON_REENTRANT.
1992 dev_priv
->wq
= alloc_workqueue("i915",
1993 WQ_UNBOUND
| WQ_NON_REENTRANT
,
1995 if (dev_priv
->wq
== NULL
) {
1996 DRM_ERROR("Failed to create our workqueue.\n");
2001 /* enable GEM by default */
2002 dev_priv
->has_gem
= 1;
2004 if (prealloc_size
> agp_size
* 3 / 4) {
2005 DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
2007 prealloc_size
/ 1024, agp_size
/ 1024);
2008 DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
2009 "updating the BIOS to fix).\n");
2010 dev_priv
->has_gem
= 0;
2013 if (dev_priv
->has_gem
== 0 &&
2014 drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2015 DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
2020 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
2021 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
2022 if (IS_G4X(dev
) || IS_GEN5(dev
) || IS_GEN6(dev
)) {
2023 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
2024 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
2027 /* Try to make sure MCHBAR is enabled before poking at it */
2028 intel_setup_mchbar(dev
);
2029 intel_setup_gmbus(dev
);
2030 intel_opregion_setup(dev
);
2032 /* Make sure the bios did its job and set up vital registers */
2033 intel_setup_bios(dev
);
2038 if (!I915_NEED_GFX_HWS(dev
)) {
2039 ret
= i915_init_phys_hws(dev
);
2041 goto out_workqueue_free
;
2044 if (IS_PINEVIEW(dev
))
2045 i915_pineview_get_mem_freq(dev
);
2046 else if (IS_GEN5(dev
))
2047 i915_ironlake_get_mem_freq(dev
);
2049 /* On the 945G/GM, the chipset reports the MSI capability on the
2050 * integrated graphics even though the support isn't actually there
2051 * according to the published specs. It doesn't appear to function
2052 * correctly in testing on 945G.
2053 * This may be a side effect of MSI having been made available for PEG
2054 * and the registers being closely associated.
2056 * According to chipset errata, on the 965GM, MSI interrupts may
2057 * be lost or delayed, but we use them anyways to avoid
2058 * stuck interrupts on some machines.
2060 if (!IS_I945G(dev
) && !IS_I945GM(dev
))
2061 pci_enable_msi(dev
->pdev
);
2063 spin_lock_init(&dev_priv
->user_irq_lock
);
2064 spin_lock_init(&dev_priv
->error_lock
);
2065 dev_priv
->trace_irq_seqno
= 0;
2067 ret
= drm_vblank_init(dev
, I915_NUM_PIPE
);
2070 (void) i915_driver_unload(dev
);
2074 /* Start out suspended */
2075 dev_priv
->mm
.suspended
= 1;
2077 intel_detect_pch(dev
);
2079 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2080 ret
= i915_load_modeset_init(dev
, prealloc_size
, agp_size
);
2082 DRM_ERROR("failed to init modeset\n");
2083 goto out_workqueue_free
;
2087 /* Must be done after probing outputs */
2088 intel_opregion_init(dev
);
2089 acpi_video_register();
2091 setup_timer(&dev_priv
->hangcheck_timer
, i915_hangcheck_elapsed
,
2092 (unsigned long) dev
);
2094 spin_lock(&mchdev_lock
);
2095 i915_mch_dev
= dev_priv
;
2096 dev_priv
->mchdev_lock
= &mchdev_lock
;
2097 spin_unlock(&mchdev_lock
);
2099 ips_ping_for_i915_load();
2104 destroy_workqueue(dev_priv
->wq
);
2106 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
2108 iounmap(dev_priv
->regs
);
2110 pci_dev_put(dev_priv
->bridge_dev
);
2116 int i915_driver_unload(struct drm_device
*dev
)
2118 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2121 spin_lock(&mchdev_lock
);
2122 i915_mch_dev
= NULL
;
2123 spin_unlock(&mchdev_lock
);
2125 mutex_lock(&dev
->struct_mutex
);
2126 ret
= i915_gpu_idle(dev
);
2128 DRM_ERROR("failed to idle hardware: %d\n", ret
);
2129 mutex_unlock(&dev
->struct_mutex
);
2131 /* Cancel the retire work handler, which should be idle now. */
2132 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
2134 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
2135 if (dev_priv
->mm
.gtt_mtrr
>= 0) {
2136 mtrr_del(dev_priv
->mm
.gtt_mtrr
, dev
->agp
->base
,
2137 dev
->agp
->agp_info
.aper_size
* 1024 * 1024);
2138 dev_priv
->mm
.gtt_mtrr
= -1;
2141 acpi_video_unregister();
2143 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2144 intel_fbdev_fini(dev
);
2145 intel_modeset_cleanup(dev
);
2148 * free the memory space allocated for the child device
2149 * config parsed from VBT
2151 if (dev_priv
->child_dev
&& dev_priv
->child_dev_num
) {
2152 kfree(dev_priv
->child_dev
);
2153 dev_priv
->child_dev
= NULL
;
2154 dev_priv
->child_dev_num
= 0;
2157 vga_switcheroo_unregister_client(dev
->pdev
);
2158 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
2161 /* Free error state after interrupts are fully disabled. */
2162 del_timer_sync(&dev_priv
->hangcheck_timer
);
2163 cancel_work_sync(&dev_priv
->error_work
);
2164 i915_destroy_error_state(dev
);
2166 if (dev
->pdev
->msi_enabled
)
2167 pci_disable_msi(dev
->pdev
);
2169 intel_opregion_fini(dev
);
2171 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2172 /* Flush any outstanding unpin_work. */
2173 flush_workqueue(dev_priv
->wq
);
2175 i915_gem_free_all_phys_object(dev
);
2177 mutex_lock(&dev
->struct_mutex
);
2178 i915_gem_cleanup_ringbuffer(dev
);
2179 mutex_unlock(&dev
->struct_mutex
);
2180 if (I915_HAS_FBC(dev
) && i915_powersave
)
2181 i915_cleanup_compression(dev
);
2182 drm_mm_takedown(&dev_priv
->mm
.vram
);
2184 intel_cleanup_overlay(dev
);
2186 if (!I915_NEED_GFX_HWS(dev
))
2190 if (dev_priv
->regs
!= NULL
)
2191 iounmap(dev_priv
->regs
);
2193 intel_teardown_gmbus(dev
);
2194 intel_teardown_mchbar(dev
);
2196 destroy_workqueue(dev_priv
->wq
);
2198 pci_dev_put(dev_priv
->bridge_dev
);
2199 kfree(dev
->dev_private
);
2204 int i915_driver_open(struct drm_device
*dev
, struct drm_file
*file
)
2206 struct drm_i915_file_private
*file_priv
;
2208 DRM_DEBUG_DRIVER("\n");
2209 file_priv
= kmalloc(sizeof(*file_priv
), GFP_KERNEL
);
2213 file
->driver_priv
= file_priv
;
2215 spin_lock_init(&file_priv
->mm
.lock
);
2216 INIT_LIST_HEAD(&file_priv
->mm
.request_list
);
2222 * i915_driver_lastclose - clean up after all DRM clients have exited
2225 * Take care of cleaning up after all DRM clients have exited. In the
2226 * mode setting case, we want to restore the kernel's initial mode (just
2227 * in case the last client left us in a bad state).
2229 * Additionally, in the non-mode setting case, we'll tear down the AGP
2230 * and DMA structures, since the kernel won't be using them, and clea
2233 void i915_driver_lastclose(struct drm_device
* dev
)
2235 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2237 if (!dev_priv
|| drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2238 drm_fb_helper_restore();
2239 vga_switcheroo_process_delayed_switch();
2243 i915_gem_lastclose(dev
);
2245 if (dev_priv
->agp_heap
)
2246 i915_mem_takedown(&(dev_priv
->agp_heap
));
2248 i915_dma_cleanup(dev
);
2251 void i915_driver_preclose(struct drm_device
* dev
, struct drm_file
*file_priv
)
2253 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2254 i915_gem_release(dev
, file_priv
);
2255 if (!drm_core_check_feature(dev
, DRIVER_MODESET
))
2256 i915_mem_release(dev
, file_priv
, dev_priv
->agp_heap
);
2259 void i915_driver_postclose(struct drm_device
*dev
, struct drm_file
*file
)
2261 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2266 struct drm_ioctl_desc i915_ioctls
[] = {
2267 DRM_IOCTL_DEF_DRV(I915_INIT
, i915_dma_init
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2268 DRM_IOCTL_DEF_DRV(I915_FLUSH
, i915_flush_ioctl
, DRM_AUTH
),
2269 DRM_IOCTL_DEF_DRV(I915_FLIP
, i915_flip_bufs
, DRM_AUTH
),
2270 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER
, i915_batchbuffer
, DRM_AUTH
),
2271 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT
, i915_irq_emit
, DRM_AUTH
),
2272 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT
, i915_irq_wait
, DRM_AUTH
),
2273 DRM_IOCTL_DEF_DRV(I915_GETPARAM
, i915_getparam
, DRM_AUTH
),
2274 DRM_IOCTL_DEF_DRV(I915_SETPARAM
, i915_setparam
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2275 DRM_IOCTL_DEF_DRV(I915_ALLOC
, i915_mem_alloc
, DRM_AUTH
),
2276 DRM_IOCTL_DEF_DRV(I915_FREE
, i915_mem_free
, DRM_AUTH
),
2277 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP
, i915_mem_init_heap
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2278 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER
, i915_cmdbuffer
, DRM_AUTH
),
2279 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP
, i915_mem_destroy_heap
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2280 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE
, i915_vblank_pipe_set
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2281 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE
, i915_vblank_pipe_get
, DRM_AUTH
),
2282 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP
, i915_vblank_swap
, DRM_AUTH
),
2283 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR
, i915_set_status_page
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2284 DRM_IOCTL_DEF_DRV(I915_GEM_INIT
, i915_gem_init_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2285 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER
, i915_gem_execbuffer
, DRM_AUTH
|DRM_UNLOCKED
),
2286 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2
, i915_gem_execbuffer2
, DRM_AUTH
|DRM_UNLOCKED
),
2287 DRM_IOCTL_DEF_DRV(I915_GEM_PIN
, i915_gem_pin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2288 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN
, i915_gem_unpin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2289 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY
, i915_gem_busy_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
2290 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE
, i915_gem_throttle_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
2291 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT
, i915_gem_entervt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2292 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT
, i915_gem_leavevt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2293 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE
, i915_gem_create_ioctl
, DRM_UNLOCKED
),
2294 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD
, i915_gem_pread_ioctl
, DRM_UNLOCKED
),
2295 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE
, i915_gem_pwrite_ioctl
, DRM_UNLOCKED
),
2296 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP
, i915_gem_mmap_ioctl
, DRM_UNLOCKED
),
2297 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT
, i915_gem_mmap_gtt_ioctl
, DRM_UNLOCKED
),
2298 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN
, i915_gem_set_domain_ioctl
, DRM_UNLOCKED
),
2299 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH
, i915_gem_sw_finish_ioctl
, DRM_UNLOCKED
),
2300 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING
, i915_gem_set_tiling
, DRM_UNLOCKED
),
2301 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING
, i915_gem_get_tiling
, DRM_UNLOCKED
),
2302 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE
, i915_gem_get_aperture_ioctl
, DRM_UNLOCKED
),
2303 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID
, intel_get_pipe_from_crtc_id
, DRM_UNLOCKED
),
2304 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE
, i915_gem_madvise_ioctl
, DRM_UNLOCKED
),
2305 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE
, intel_overlay_put_image
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
2306 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS
, intel_overlay_attrs
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
2309 int i915_max_ioctl
= DRM_ARRAY_SIZE(i915_ioctls
);
2312 * Determine if the device really is AGP or not.
2314 * All Intel graphics chipsets are treated as AGP, even if they are really
2317 * \param dev The device to be tested.
2320 * A value of 1 is always retured to indictate every i9x5 is AGP.
2322 int i915_driver_device_is_agp(struct drm_device
* dev
)