]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- |
2 | */ | |
0d6aa60b | 3 | /* |
1da177e4 LT |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
5 | * All Rights Reserved. | |
bc54fd1a DA |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
0d6aa60b | 27 | */ |
1da177e4 LT |
28 | |
29 | #include "drmP.h" | |
30 | #include "drm.h" | |
79e53945 | 31 | #include "drm_crtc_helper.h" |
785b93ef | 32 | #include "drm_fb_helper.h" |
79e53945 | 33 | #include "intel_drv.h" |
1da177e4 LT |
34 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | |
1c5d22f7 | 36 | #include "i915_trace.h" |
dcdb1674 | 37 | #include <linux/pci.h> |
28d52043 | 38 | #include <linux/vgaarb.h> |
c4804411 ZW |
39 | #include <linux/acpi.h> |
40 | #include <linux/pnp.h> | |
6a9ee8af | 41 | #include <linux/vga_switcheroo.h> |
5a0e3ad6 | 42 | #include <linux/slab.h> |
44834a67 | 43 | #include <acpi/video.h> |
1da177e4 | 44 | |
398c9cb2 KP |
45 | /** |
46 | * Sets up the hardware status page for devices that need a physical address | |
47 | * in the register. | |
48 | */ | |
3043c60c | 49 | static int i915_init_phys_hws(struct drm_device *dev) |
398c9cb2 KP |
50 | { |
51 | drm_i915_private_t *dev_priv = dev->dev_private; | |
52 | /* Program Hardware Status Page */ | |
53 | dev_priv->status_page_dmah = | |
e6be8d9d | 54 | drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); |
398c9cb2 KP |
55 | |
56 | if (!dev_priv->status_page_dmah) { | |
57 | DRM_ERROR("Can not allocate hardware status page\n"); | |
58 | return -ENOMEM; | |
59 | } | |
8187a2b7 ZN |
60 | dev_priv->render_ring.status_page.page_addr |
61 | = dev_priv->status_page_dmah->vaddr; | |
398c9cb2 KP |
62 | dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; |
63 | ||
8187a2b7 | 64 | memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); |
398c9cb2 | 65 | |
a6c45cf0 | 66 | if (INTEL_INFO(dev)->gen >= 4) |
9b974cc1 ZW |
67 | dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & |
68 | 0xf0; | |
69 | ||
398c9cb2 | 70 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); |
8a4c47f3 | 71 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
398c9cb2 KP |
72 | return 0; |
73 | } | |
74 | ||
75 | /** | |
76 | * Frees the hardware status page, whether it's a physical address or a virtual | |
77 | * address set up by the X Server. | |
78 | */ | |
3043c60c | 79 | static void i915_free_hws(struct drm_device *dev) |
398c9cb2 KP |
80 | { |
81 | drm_i915_private_t *dev_priv = dev->dev_private; | |
82 | if (dev_priv->status_page_dmah) { | |
83 | drm_pci_free(dev, dev_priv->status_page_dmah); | |
84 | dev_priv->status_page_dmah = NULL; | |
85 | } | |
86 | ||
852835f3 ZN |
87 | if (dev_priv->render_ring.status_page.gfx_addr) { |
88 | dev_priv->render_ring.status_page.gfx_addr = 0; | |
398c9cb2 KP |
89 | drm_core_ioremapfree(&dev_priv->hws_map, dev); |
90 | } | |
91 | ||
92 | /* Need to rewrite hardware status page */ | |
93 | I915_WRITE(HWS_PGA, 0x1ffff000); | |
94 | } | |
95 | ||
84b1fd10 | 96 | void i915_kernel_lost_context(struct drm_device * dev) |
1da177e4 LT |
97 | { |
98 | drm_i915_private_t *dev_priv = dev->dev_private; | |
7c1c2871 | 99 | struct drm_i915_master_private *master_priv; |
8187a2b7 | 100 | struct intel_ring_buffer *ring = &dev_priv->render_ring; |
1da177e4 | 101 | |
79e53945 JB |
102 | /* |
103 | * We should never lose context on the ring with modesetting | |
104 | * as we don't expose it to userspace | |
105 | */ | |
106 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
107 | return; | |
108 | ||
585fb111 JB |
109 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; |
110 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | |
1da177e4 LT |
111 | ring->space = ring->head - (ring->tail + 8); |
112 | if (ring->space < 0) | |
8187a2b7 | 113 | ring->space += ring->size; |
1da177e4 | 114 | |
7c1c2871 DA |
115 | if (!dev->primary->master) |
116 | return; | |
117 | ||
118 | master_priv = dev->primary->master->driver_priv; | |
119 | if (ring->head == ring->tail && master_priv->sarea_priv) | |
120 | master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; | |
1da177e4 LT |
121 | } |
122 | ||
84b1fd10 | 123 | static int i915_dma_cleanup(struct drm_device * dev) |
1da177e4 | 124 | { |
ba8bbcf6 | 125 | drm_i915_private_t *dev_priv = dev->dev_private; |
1da177e4 LT |
126 | /* Make sure interrupts are disabled here because the uninstall ioctl |
127 | * may not have been called from userspace and after dev_private | |
128 | * is freed, it's too late. | |
129 | */ | |
ed4cb414 | 130 | if (dev->irq_enabled) |
b5e89ed5 | 131 | drm_irq_uninstall(dev); |
1da177e4 | 132 | |
ee0c6bfb | 133 | mutex_lock(&dev->struct_mutex); |
78501eac CW |
134 | intel_cleanup_ring_buffer(&dev_priv->render_ring); |
135 | intel_cleanup_ring_buffer(&dev_priv->bsd_ring); | |
136 | intel_cleanup_ring_buffer(&dev_priv->blt_ring); | |
ee0c6bfb | 137 | mutex_unlock(&dev->struct_mutex); |
dc7a9319 | 138 | |
398c9cb2 KP |
139 | /* Clear the HWS virtual address at teardown */ |
140 | if (I915_NEED_GFX_HWS(dev)) | |
141 | i915_free_hws(dev); | |
1da177e4 LT |
142 | |
143 | return 0; | |
144 | } | |
145 | ||
ba8bbcf6 | 146 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) |
1da177e4 | 147 | { |
ba8bbcf6 | 148 | drm_i915_private_t *dev_priv = dev->dev_private; |
7c1c2871 | 149 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
1da177e4 | 150 | |
3a03ac1a DA |
151 | master_priv->sarea = drm_getsarea(dev); |
152 | if (master_priv->sarea) { | |
153 | master_priv->sarea_priv = (drm_i915_sarea_t *) | |
154 | ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); | |
155 | } else { | |
8a4c47f3 | 156 | DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); |
3a03ac1a DA |
157 | } |
158 | ||
673a394b | 159 | if (init->ring_size != 0) { |
8187a2b7 | 160 | if (dev_priv->render_ring.gem_object != NULL) { |
673a394b EA |
161 | i915_dma_cleanup(dev); |
162 | DRM_ERROR("Client tried to initialize ringbuffer in " | |
163 | "GEM mode\n"); | |
164 | return -EINVAL; | |
165 | } | |
1da177e4 | 166 | |
8187a2b7 | 167 | dev_priv->render_ring.size = init->ring_size; |
1da177e4 | 168 | |
d3301d86 EA |
169 | dev_priv->render_ring.map.offset = init->ring_start; |
170 | dev_priv->render_ring.map.size = init->ring_size; | |
171 | dev_priv->render_ring.map.type = 0; | |
172 | dev_priv->render_ring.map.flags = 0; | |
173 | dev_priv->render_ring.map.mtrr = 0; | |
1da177e4 | 174 | |
d3301d86 | 175 | drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); |
673a394b | 176 | |
d3301d86 | 177 | if (dev_priv->render_ring.map.handle == NULL) { |
673a394b EA |
178 | i915_dma_cleanup(dev); |
179 | DRM_ERROR("can not ioremap virtual address for" | |
180 | " ring buffer\n"); | |
181 | return -ENOMEM; | |
182 | } | |
1da177e4 LT |
183 | } |
184 | ||
d3301d86 | 185 | dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; |
1da177e4 | 186 | |
a6b54f3f | 187 | dev_priv->cpp = init->cpp; |
1da177e4 LT |
188 | dev_priv->back_offset = init->back_offset; |
189 | dev_priv->front_offset = init->front_offset; | |
190 | dev_priv->current_page = 0; | |
7c1c2871 DA |
191 | if (master_priv->sarea_priv) |
192 | master_priv->sarea_priv->pf_current_page = 0; | |
1da177e4 | 193 | |
1da177e4 LT |
194 | /* Allow hardware batchbuffers unless told otherwise. |
195 | */ | |
196 | dev_priv->allow_batchbuffer = 1; | |
197 | ||
1da177e4 LT |
198 | return 0; |
199 | } | |
200 | ||
84b1fd10 | 201 | static int i915_dma_resume(struct drm_device * dev) |
1da177e4 LT |
202 | { |
203 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
204 | ||
8187a2b7 | 205 | struct intel_ring_buffer *ring; |
8a4c47f3 | 206 | DRM_DEBUG_DRIVER("%s\n", __func__); |
1da177e4 | 207 | |
8187a2b7 ZN |
208 | ring = &dev_priv->render_ring; |
209 | ||
210 | if (ring->map.handle == NULL) { | |
1da177e4 LT |
211 | DRM_ERROR("can not ioremap virtual address for" |
212 | " ring buffer\n"); | |
20caafa6 | 213 | return -ENOMEM; |
1da177e4 LT |
214 | } |
215 | ||
216 | /* Program Hardware Status Page */ | |
8187a2b7 | 217 | if (!ring->status_page.page_addr) { |
1da177e4 | 218 | DRM_ERROR("Can not find hardware status page\n"); |
20caafa6 | 219 | return -EINVAL; |
1da177e4 | 220 | } |
8a4c47f3 | 221 | DRM_DEBUG_DRIVER("hw status page @ %p\n", |
8187a2b7 ZN |
222 | ring->status_page.page_addr); |
223 | if (ring->status_page.gfx_addr != 0) | |
78501eac | 224 | intel_ring_setup_status_page(ring); |
dc7a9319 | 225 | else |
585fb111 | 226 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); |
8187a2b7 | 227 | |
8a4c47f3 | 228 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
1da177e4 LT |
229 | |
230 | return 0; | |
231 | } | |
232 | ||
c153f45f EA |
233 | static int i915_dma_init(struct drm_device *dev, void *data, |
234 | struct drm_file *file_priv) | |
1da177e4 | 235 | { |
c153f45f | 236 | drm_i915_init_t *init = data; |
1da177e4 LT |
237 | int retcode = 0; |
238 | ||
c153f45f | 239 | switch (init->func) { |
1da177e4 | 240 | case I915_INIT_DMA: |
ba8bbcf6 | 241 | retcode = i915_initialize(dev, init); |
1da177e4 LT |
242 | break; |
243 | case I915_CLEANUP_DMA: | |
244 | retcode = i915_dma_cleanup(dev); | |
245 | break; | |
246 | case I915_RESUME_DMA: | |
0d6aa60b | 247 | retcode = i915_dma_resume(dev); |
1da177e4 LT |
248 | break; |
249 | default: | |
20caafa6 | 250 | retcode = -EINVAL; |
1da177e4 LT |
251 | break; |
252 | } | |
253 | ||
254 | return retcode; | |
255 | } | |
256 | ||
257 | /* Implement basically the same security restrictions as hardware does | |
258 | * for MI_BATCH_NON_SECURE. These can be made stricter at any time. | |
259 | * | |
260 | * Most of the calculations below involve calculating the size of a | |
261 | * particular instruction. It's important to get the size right as | |
262 | * that tells us where the next instruction to check is. Any illegal | |
263 | * instruction detected will be given a size of zero, which is a | |
264 | * signal to abort the rest of the buffer. | |
265 | */ | |
e1f99ce6 | 266 | static int validate_cmd(int cmd) |
1da177e4 LT |
267 | { |
268 | switch (((cmd >> 29) & 0x7)) { | |
269 | case 0x0: | |
270 | switch ((cmd >> 23) & 0x3f) { | |
271 | case 0x0: | |
272 | return 1; /* MI_NOOP */ | |
273 | case 0x4: | |
274 | return 1; /* MI_FLUSH */ | |
275 | default: | |
276 | return 0; /* disallow everything else */ | |
277 | } | |
278 | break; | |
279 | case 0x1: | |
280 | return 0; /* reserved */ | |
281 | case 0x2: | |
282 | return (cmd & 0xff) + 2; /* 2d commands */ | |
283 | case 0x3: | |
284 | if (((cmd >> 24) & 0x1f) <= 0x18) | |
285 | return 1; | |
286 | ||
287 | switch ((cmd >> 24) & 0x1f) { | |
288 | case 0x1c: | |
289 | return 1; | |
290 | case 0x1d: | |
b5e89ed5 | 291 | switch ((cmd >> 16) & 0xff) { |
1da177e4 LT |
292 | case 0x3: |
293 | return (cmd & 0x1f) + 2; | |
294 | case 0x4: | |
295 | return (cmd & 0xf) + 2; | |
296 | default: | |
297 | return (cmd & 0xffff) + 2; | |
298 | } | |
299 | case 0x1e: | |
300 | if (cmd & (1 << 23)) | |
301 | return (cmd & 0xffff) + 1; | |
302 | else | |
303 | return 1; | |
304 | case 0x1f: | |
305 | if ((cmd & (1 << 23)) == 0) /* inline vertices */ | |
306 | return (cmd & 0x1ffff) + 2; | |
307 | else if (cmd & (1 << 17)) /* indirect random */ | |
308 | if ((cmd & 0xffff) == 0) | |
309 | return 0; /* unknown length, too hard */ | |
310 | else | |
311 | return (((cmd & 0xffff) + 1) / 2) + 1; | |
312 | else | |
313 | return 2; /* indirect sequential */ | |
314 | default: | |
315 | return 0; | |
316 | } | |
317 | default: | |
318 | return 0; | |
319 | } | |
320 | ||
321 | return 0; | |
322 | } | |
323 | ||
201361a5 | 324 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) |
1da177e4 LT |
325 | { |
326 | drm_i915_private_t *dev_priv = dev->dev_private; | |
e1f99ce6 | 327 | int i, ret; |
1da177e4 | 328 | |
8187a2b7 | 329 | if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) |
20caafa6 | 330 | return -EINVAL; |
de227f5f | 331 | |
1da177e4 | 332 | for (i = 0; i < dwords;) { |
e1f99ce6 CW |
333 | int sz = validate_cmd(buffer[i]); |
334 | if (sz == 0 || i + sz > dwords) | |
20caafa6 | 335 | return -EINVAL; |
e1f99ce6 | 336 | i += sz; |
1da177e4 LT |
337 | } |
338 | ||
e1f99ce6 CW |
339 | ret = BEGIN_LP_RING((dwords+1)&~1); |
340 | if (ret) | |
341 | return ret; | |
342 | ||
343 | for (i = 0; i < dwords; i++) | |
344 | OUT_RING(buffer[i]); | |
de227f5f DA |
345 | if (dwords & 1) |
346 | OUT_RING(0); | |
347 | ||
348 | ADVANCE_LP_RING(); | |
349 | ||
1da177e4 LT |
350 | return 0; |
351 | } | |
352 | ||
673a394b EA |
353 | int |
354 | i915_emit_box(struct drm_device *dev, | |
201361a5 | 355 | struct drm_clip_rect *boxes, |
673a394b | 356 | int i, int DR1, int DR4) |
1da177e4 | 357 | { |
e1f99ce6 | 358 | struct drm_i915_private *dev_priv = dev->dev_private; |
201361a5 | 359 | struct drm_clip_rect box = boxes[i]; |
e1f99ce6 | 360 | int ret; |
1da177e4 | 361 | |
1da177e4 LT |
362 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { |
363 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | |
364 | box.x1, box.y1, box.x2, box.y2); | |
20caafa6 | 365 | return -EINVAL; |
1da177e4 LT |
366 | } |
367 | ||
a6c45cf0 | 368 | if (INTEL_INFO(dev)->gen >= 4) { |
e1f99ce6 CW |
369 | ret = BEGIN_LP_RING(4); |
370 | if (ret) | |
371 | return ret; | |
372 | ||
c29b669c AH |
373 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); |
374 | OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); | |
78eca43d | 375 | OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); |
c29b669c | 376 | OUT_RING(DR4); |
c29b669c | 377 | } else { |
e1f99ce6 CW |
378 | ret = BEGIN_LP_RING(6); |
379 | if (ret) | |
380 | return ret; | |
381 | ||
c29b669c AH |
382 | OUT_RING(GFX_OP_DRAWRECT_INFO); |
383 | OUT_RING(DR1); | |
384 | OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); | |
385 | OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); | |
386 | OUT_RING(DR4); | |
387 | OUT_RING(0); | |
c29b669c | 388 | } |
e1f99ce6 | 389 | ADVANCE_LP_RING(); |
1da177e4 LT |
390 | |
391 | return 0; | |
392 | } | |
393 | ||
c29b669c AH |
394 | /* XXX: Emitting the counter should really be moved to part of the IRQ |
395 | * emit. For now, do it in both places: | |
396 | */ | |
397 | ||
84b1fd10 | 398 | static void i915_emit_breadcrumb(struct drm_device *dev) |
de227f5f DA |
399 | { |
400 | drm_i915_private_t *dev_priv = dev->dev_private; | |
7c1c2871 | 401 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
de227f5f | 402 | |
c99b058f | 403 | dev_priv->counter++; |
af6061af | 404 | if (dev_priv->counter > 0x7FFFFFFFUL) |
c99b058f | 405 | dev_priv->counter = 0; |
7c1c2871 DA |
406 | if (master_priv->sarea_priv) |
407 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; | |
de227f5f | 408 | |
e1f99ce6 CW |
409 | if (BEGIN_LP_RING(4) == 0) { |
410 | OUT_RING(MI_STORE_DWORD_INDEX); | |
411 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
412 | OUT_RING(dev_priv->counter); | |
413 | OUT_RING(0); | |
414 | ADVANCE_LP_RING(); | |
415 | } | |
de227f5f DA |
416 | } |
417 | ||
84b1fd10 | 418 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, |
201361a5 EA |
419 | drm_i915_cmdbuffer_t *cmd, |
420 | struct drm_clip_rect *cliprects, | |
421 | void *cmdbuf) | |
1da177e4 LT |
422 | { |
423 | int nbox = cmd->num_cliprects; | |
424 | int i = 0, count, ret; | |
425 | ||
426 | if (cmd->sz & 0x3) { | |
427 | DRM_ERROR("alignment"); | |
20caafa6 | 428 | return -EINVAL; |
1da177e4 LT |
429 | } |
430 | ||
431 | i915_kernel_lost_context(dev); | |
432 | ||
433 | count = nbox ? nbox : 1; | |
434 | ||
435 | for (i = 0; i < count; i++) { | |
436 | if (i < nbox) { | |
201361a5 | 437 | ret = i915_emit_box(dev, cliprects, i, |
1da177e4 LT |
438 | cmd->DR1, cmd->DR4); |
439 | if (ret) | |
440 | return ret; | |
441 | } | |
442 | ||
201361a5 | 443 | ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); |
1da177e4 LT |
444 | if (ret) |
445 | return ret; | |
446 | } | |
447 | ||
de227f5f | 448 | i915_emit_breadcrumb(dev); |
1da177e4 LT |
449 | return 0; |
450 | } | |
451 | ||
84b1fd10 | 452 | static int i915_dispatch_batchbuffer(struct drm_device * dev, |
201361a5 EA |
453 | drm_i915_batchbuffer_t * batch, |
454 | struct drm_clip_rect *cliprects) | |
1da177e4 | 455 | { |
e1f99ce6 | 456 | struct drm_i915_private *dev_priv = dev->dev_private; |
1da177e4 | 457 | int nbox = batch->num_cliprects; |
e1f99ce6 | 458 | int i, count, ret; |
1da177e4 LT |
459 | |
460 | if ((batch->start | batch->used) & 0x7) { | |
461 | DRM_ERROR("alignment"); | |
20caafa6 | 462 | return -EINVAL; |
1da177e4 LT |
463 | } |
464 | ||
465 | i915_kernel_lost_context(dev); | |
466 | ||
467 | count = nbox ? nbox : 1; | |
1da177e4 LT |
468 | for (i = 0; i < count; i++) { |
469 | if (i < nbox) { | |
e1f99ce6 CW |
470 | ret = i915_emit_box(dev, cliprects, i, |
471 | batch->DR1, batch->DR4); | |
1da177e4 LT |
472 | if (ret) |
473 | return ret; | |
474 | } | |
475 | ||
0790d5e1 | 476 | if (!IS_I830(dev) && !IS_845G(dev)) { |
e1f99ce6 CW |
477 | ret = BEGIN_LP_RING(2); |
478 | if (ret) | |
479 | return ret; | |
480 | ||
a6c45cf0 | 481 | if (INTEL_INFO(dev)->gen >= 4) { |
21f16289 DA |
482 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); |
483 | OUT_RING(batch->start); | |
484 | } else { | |
485 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); | |
486 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | |
487 | } | |
1da177e4 | 488 | } else { |
e1f99ce6 CW |
489 | ret = BEGIN_LP_RING(4); |
490 | if (ret) | |
491 | return ret; | |
492 | ||
1da177e4 LT |
493 | OUT_RING(MI_BATCH_BUFFER); |
494 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | |
495 | OUT_RING(batch->start + batch->used - 4); | |
496 | OUT_RING(0); | |
1da177e4 | 497 | } |
e1f99ce6 | 498 | ADVANCE_LP_RING(); |
1da177e4 LT |
499 | } |
500 | ||
1cafd347 | 501 | |
f00a3ddf | 502 | if (IS_G4X(dev) || IS_GEN5(dev)) { |
e1f99ce6 CW |
503 | if (BEGIN_LP_RING(2) == 0) { |
504 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); | |
505 | OUT_RING(MI_NOOP); | |
506 | ADVANCE_LP_RING(); | |
507 | } | |
1cafd347 | 508 | } |
1da177e4 | 509 | |
e1f99ce6 | 510 | i915_emit_breadcrumb(dev); |
1da177e4 LT |
511 | return 0; |
512 | } | |
513 | ||
af6061af | 514 | static int i915_dispatch_flip(struct drm_device * dev) |
1da177e4 LT |
515 | { |
516 | drm_i915_private_t *dev_priv = dev->dev_private; | |
7c1c2871 DA |
517 | struct drm_i915_master_private *master_priv = |
518 | dev->primary->master->driver_priv; | |
e1f99ce6 | 519 | int ret; |
1da177e4 | 520 | |
7c1c2871 | 521 | if (!master_priv->sarea_priv) |
c99b058f KH |
522 | return -EINVAL; |
523 | ||
8a4c47f3 | 524 | DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", |
be25ed9c | 525 | __func__, |
526 | dev_priv->current_page, | |
527 | master_priv->sarea_priv->pf_current_page); | |
1da177e4 | 528 | |
af6061af DA |
529 | i915_kernel_lost_context(dev); |
530 | ||
e1f99ce6 CW |
531 | ret = BEGIN_LP_RING(10); |
532 | if (ret) | |
533 | return ret; | |
534 | ||
585fb111 | 535 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); |
af6061af | 536 | OUT_RING(0); |
1da177e4 | 537 | |
af6061af DA |
538 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); |
539 | OUT_RING(0); | |
540 | if (dev_priv->current_page == 0) { | |
541 | OUT_RING(dev_priv->back_offset); | |
542 | dev_priv->current_page = 1; | |
1da177e4 | 543 | } else { |
af6061af DA |
544 | OUT_RING(dev_priv->front_offset); |
545 | dev_priv->current_page = 0; | |
1da177e4 | 546 | } |
af6061af | 547 | OUT_RING(0); |
1da177e4 | 548 | |
af6061af DA |
549 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); |
550 | OUT_RING(0); | |
e1f99ce6 | 551 | |
af6061af | 552 | ADVANCE_LP_RING(); |
1da177e4 | 553 | |
7c1c2871 | 554 | master_priv->sarea_priv->last_enqueue = dev_priv->counter++; |
1da177e4 | 555 | |
e1f99ce6 CW |
556 | if (BEGIN_LP_RING(4) == 0) { |
557 | OUT_RING(MI_STORE_DWORD_INDEX); | |
558 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
559 | OUT_RING(dev_priv->counter); | |
560 | OUT_RING(0); | |
561 | ADVANCE_LP_RING(); | |
562 | } | |
1da177e4 | 563 | |
7c1c2871 | 564 | master_priv->sarea_priv->pf_current_page = dev_priv->current_page; |
af6061af | 565 | return 0; |
1da177e4 LT |
566 | } |
567 | ||
84b1fd10 | 568 | static int i915_quiescent(struct drm_device * dev) |
1da177e4 LT |
569 | { |
570 | drm_i915_private_t *dev_priv = dev->dev_private; | |
571 | ||
572 | i915_kernel_lost_context(dev); | |
78501eac | 573 | return intel_wait_ring_buffer(&dev_priv->render_ring, |
8187a2b7 | 574 | dev_priv->render_ring.size - 8); |
1da177e4 LT |
575 | } |
576 | ||
c153f45f EA |
577 | static int i915_flush_ioctl(struct drm_device *dev, void *data, |
578 | struct drm_file *file_priv) | |
1da177e4 | 579 | { |
546b0974 EA |
580 | int ret; |
581 | ||
582 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | |
1da177e4 | 583 | |
546b0974 EA |
584 | mutex_lock(&dev->struct_mutex); |
585 | ret = i915_quiescent(dev); | |
586 | mutex_unlock(&dev->struct_mutex); | |
587 | ||
588 | return ret; | |
1da177e4 LT |
589 | } |
590 | ||
c153f45f EA |
591 | static int i915_batchbuffer(struct drm_device *dev, void *data, |
592 | struct drm_file *file_priv) | |
1da177e4 | 593 | { |
1da177e4 | 594 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
7c1c2871 | 595 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
1da177e4 | 596 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
7c1c2871 | 597 | master_priv->sarea_priv; |
c153f45f | 598 | drm_i915_batchbuffer_t *batch = data; |
1da177e4 | 599 | int ret; |
201361a5 | 600 | struct drm_clip_rect *cliprects = NULL; |
1da177e4 LT |
601 | |
602 | if (!dev_priv->allow_batchbuffer) { | |
603 | DRM_ERROR("Batchbuffer ioctl disabled\n"); | |
20caafa6 | 604 | return -EINVAL; |
1da177e4 LT |
605 | } |
606 | ||
8a4c47f3 | 607 | DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", |
be25ed9c | 608 | batch->start, batch->used, batch->num_cliprects); |
1da177e4 | 609 | |
546b0974 | 610 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
1da177e4 | 611 | |
201361a5 EA |
612 | if (batch->num_cliprects < 0) |
613 | return -EINVAL; | |
614 | ||
615 | if (batch->num_cliprects) { | |
9a298b2a EA |
616 | cliprects = kcalloc(batch->num_cliprects, |
617 | sizeof(struct drm_clip_rect), | |
618 | GFP_KERNEL); | |
201361a5 EA |
619 | if (cliprects == NULL) |
620 | return -ENOMEM; | |
621 | ||
622 | ret = copy_from_user(cliprects, batch->cliprects, | |
623 | batch->num_cliprects * | |
624 | sizeof(struct drm_clip_rect)); | |
9927a403 DC |
625 | if (ret != 0) { |
626 | ret = -EFAULT; | |
201361a5 | 627 | goto fail_free; |
9927a403 | 628 | } |
201361a5 | 629 | } |
1da177e4 | 630 | |
546b0974 | 631 | mutex_lock(&dev->struct_mutex); |
201361a5 | 632 | ret = i915_dispatch_batchbuffer(dev, batch, cliprects); |
546b0974 | 633 | mutex_unlock(&dev->struct_mutex); |
1da177e4 | 634 | |
c99b058f | 635 | if (sarea_priv) |
0baf823a | 636 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
201361a5 EA |
637 | |
638 | fail_free: | |
9a298b2a | 639 | kfree(cliprects); |
201361a5 | 640 | |
1da177e4 LT |
641 | return ret; |
642 | } | |
643 | ||
c153f45f EA |
644 | static int i915_cmdbuffer(struct drm_device *dev, void *data, |
645 | struct drm_file *file_priv) | |
1da177e4 | 646 | { |
1da177e4 | 647 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
7c1c2871 | 648 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
1da177e4 | 649 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
7c1c2871 | 650 | master_priv->sarea_priv; |
c153f45f | 651 | drm_i915_cmdbuffer_t *cmdbuf = data; |
201361a5 EA |
652 | struct drm_clip_rect *cliprects = NULL; |
653 | void *batch_data; | |
1da177e4 LT |
654 | int ret; |
655 | ||
8a4c47f3 | 656 | DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", |
be25ed9c | 657 | cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); |
1da177e4 | 658 | |
546b0974 | 659 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
1da177e4 | 660 | |
201361a5 EA |
661 | if (cmdbuf->num_cliprects < 0) |
662 | return -EINVAL; | |
663 | ||
9a298b2a | 664 | batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); |
201361a5 EA |
665 | if (batch_data == NULL) |
666 | return -ENOMEM; | |
667 | ||
668 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); | |
9927a403 DC |
669 | if (ret != 0) { |
670 | ret = -EFAULT; | |
201361a5 | 671 | goto fail_batch_free; |
9927a403 | 672 | } |
201361a5 EA |
673 | |
674 | if (cmdbuf->num_cliprects) { | |
9a298b2a EA |
675 | cliprects = kcalloc(cmdbuf->num_cliprects, |
676 | sizeof(struct drm_clip_rect), GFP_KERNEL); | |
a40e8d31 OA |
677 | if (cliprects == NULL) { |
678 | ret = -ENOMEM; | |
201361a5 | 679 | goto fail_batch_free; |
a40e8d31 | 680 | } |
201361a5 EA |
681 | |
682 | ret = copy_from_user(cliprects, cmdbuf->cliprects, | |
683 | cmdbuf->num_cliprects * | |
684 | sizeof(struct drm_clip_rect)); | |
9927a403 DC |
685 | if (ret != 0) { |
686 | ret = -EFAULT; | |
201361a5 | 687 | goto fail_clip_free; |
9927a403 | 688 | } |
1da177e4 LT |
689 | } |
690 | ||
546b0974 | 691 | mutex_lock(&dev->struct_mutex); |
201361a5 | 692 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); |
546b0974 | 693 | mutex_unlock(&dev->struct_mutex); |
1da177e4 LT |
694 | if (ret) { |
695 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); | |
355d7f37 | 696 | goto fail_clip_free; |
1da177e4 LT |
697 | } |
698 | ||
c99b058f | 699 | if (sarea_priv) |
0baf823a | 700 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
201361a5 | 701 | |
201361a5 | 702 | fail_clip_free: |
9a298b2a | 703 | kfree(cliprects); |
355d7f37 | 704 | fail_batch_free: |
9a298b2a | 705 | kfree(batch_data); |
201361a5 EA |
706 | |
707 | return ret; | |
1da177e4 LT |
708 | } |
709 | ||
c153f45f EA |
710 | static int i915_flip_bufs(struct drm_device *dev, void *data, |
711 | struct drm_file *file_priv) | |
1da177e4 | 712 | { |
546b0974 EA |
713 | int ret; |
714 | ||
8a4c47f3 | 715 | DRM_DEBUG_DRIVER("%s\n", __func__); |
1da177e4 | 716 | |
546b0974 | 717 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
1da177e4 | 718 | |
546b0974 EA |
719 | mutex_lock(&dev->struct_mutex); |
720 | ret = i915_dispatch_flip(dev); | |
721 | mutex_unlock(&dev->struct_mutex); | |
722 | ||
723 | return ret; | |
1da177e4 LT |
724 | } |
725 | ||
c153f45f EA |
726 | static int i915_getparam(struct drm_device *dev, void *data, |
727 | struct drm_file *file_priv) | |
1da177e4 | 728 | { |
1da177e4 | 729 | drm_i915_private_t *dev_priv = dev->dev_private; |
c153f45f | 730 | drm_i915_getparam_t *param = data; |
1da177e4 LT |
731 | int value; |
732 | ||
733 | if (!dev_priv) { | |
3e684eae | 734 | DRM_ERROR("called with no initialization\n"); |
20caafa6 | 735 | return -EINVAL; |
1da177e4 LT |
736 | } |
737 | ||
c153f45f | 738 | switch (param->param) { |
1da177e4 | 739 | case I915_PARAM_IRQ_ACTIVE: |
0a3e67a4 | 740 | value = dev->pdev->irq ? 1 : 0; |
1da177e4 LT |
741 | break; |
742 | case I915_PARAM_ALLOW_BATCHBUFFER: | |
743 | value = dev_priv->allow_batchbuffer ? 1 : 0; | |
744 | break; | |
0d6aa60b DA |
745 | case I915_PARAM_LAST_DISPATCH: |
746 | value = READ_BREADCRUMB(dev_priv); | |
747 | break; | |
ed4c9c4a KH |
748 | case I915_PARAM_CHIPSET_ID: |
749 | value = dev->pci_device; | |
750 | break; | |
673a394b | 751 | case I915_PARAM_HAS_GEM: |
ac5c4e76 | 752 | value = dev_priv->has_gem; |
673a394b | 753 | break; |
0f973f27 JB |
754 | case I915_PARAM_NUM_FENCES_AVAIL: |
755 | value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; | |
756 | break; | |
02e792fb DV |
757 | case I915_PARAM_HAS_OVERLAY: |
758 | value = dev_priv->overlay ? 1 : 0; | |
759 | break; | |
e9560f7c JB |
760 | case I915_PARAM_HAS_PAGEFLIPPING: |
761 | value = 1; | |
762 | break; | |
76446cac JB |
763 | case I915_PARAM_HAS_EXECBUF2: |
764 | /* depends on GEM */ | |
765 | value = dev_priv->has_gem; | |
766 | break; | |
e3a815fc ZN |
767 | case I915_PARAM_HAS_BSD: |
768 | value = HAS_BSD(dev); | |
769 | break; | |
549f7365 CW |
770 | case I915_PARAM_HAS_BLT: |
771 | value = HAS_BLT(dev); | |
772 | break; | |
1da177e4 | 773 | default: |
8a4c47f3 | 774 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
76446cac | 775 | param->param); |
20caafa6 | 776 | return -EINVAL; |
1da177e4 LT |
777 | } |
778 | ||
c153f45f | 779 | if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { |
1da177e4 | 780 | DRM_ERROR("DRM_COPY_TO_USER failed\n"); |
20caafa6 | 781 | return -EFAULT; |
1da177e4 LT |
782 | } |
783 | ||
784 | return 0; | |
785 | } | |
786 | ||
c153f45f EA |
787 | static int i915_setparam(struct drm_device *dev, void *data, |
788 | struct drm_file *file_priv) | |
1da177e4 | 789 | { |
1da177e4 | 790 | drm_i915_private_t *dev_priv = dev->dev_private; |
c153f45f | 791 | drm_i915_setparam_t *param = data; |
1da177e4 LT |
792 | |
793 | if (!dev_priv) { | |
3e684eae | 794 | DRM_ERROR("called with no initialization\n"); |
20caafa6 | 795 | return -EINVAL; |
1da177e4 LT |
796 | } |
797 | ||
c153f45f | 798 | switch (param->param) { |
1da177e4 | 799 | case I915_SETPARAM_USE_MI_BATCHBUFFER_START: |
1da177e4 LT |
800 | break; |
801 | case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: | |
c153f45f | 802 | dev_priv->tex_lru_log_granularity = param->value; |
1da177e4 LT |
803 | break; |
804 | case I915_SETPARAM_ALLOW_BATCHBUFFER: | |
c153f45f | 805 | dev_priv->allow_batchbuffer = param->value; |
1da177e4 | 806 | break; |
0f973f27 JB |
807 | case I915_SETPARAM_NUM_USED_FENCES: |
808 | if (param->value > dev_priv->num_fence_regs || | |
809 | param->value < 0) | |
810 | return -EINVAL; | |
811 | /* Userspace can use first N regs */ | |
812 | dev_priv->fence_reg_start = param->value; | |
813 | break; | |
1da177e4 | 814 | default: |
8a4c47f3 | 815 | DRM_DEBUG_DRIVER("unknown parameter %d\n", |
be25ed9c | 816 | param->param); |
20caafa6 | 817 | return -EINVAL; |
1da177e4 LT |
818 | } |
819 | ||
820 | return 0; | |
821 | } | |
822 | ||
c153f45f EA |
823 | static int i915_set_status_page(struct drm_device *dev, void *data, |
824 | struct drm_file *file_priv) | |
dc7a9319 | 825 | { |
dc7a9319 | 826 | drm_i915_private_t *dev_priv = dev->dev_private; |
c153f45f | 827 | drm_i915_hws_addr_t *hws = data; |
8187a2b7 | 828 | struct intel_ring_buffer *ring = &dev_priv->render_ring; |
b39d50e5 ZW |
829 | |
830 | if (!I915_NEED_GFX_HWS(dev)) | |
831 | return -EINVAL; | |
dc7a9319 WZ |
832 | |
833 | if (!dev_priv) { | |
3e684eae | 834 | DRM_ERROR("called with no initialization\n"); |
20caafa6 | 835 | return -EINVAL; |
dc7a9319 | 836 | } |
dc7a9319 | 837 | |
79e53945 JB |
838 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
839 | WARN(1, "tried to set status page when mode setting active\n"); | |
840 | return 0; | |
841 | } | |
842 | ||
8a4c47f3 | 843 | DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); |
c153f45f | 844 | |
8187a2b7 | 845 | ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); |
dc7a9319 | 846 | |
8b409580 | 847 | dev_priv->hws_map.offset = dev->agp->base + hws->addr; |
dc7a9319 WZ |
848 | dev_priv->hws_map.size = 4*1024; |
849 | dev_priv->hws_map.type = 0; | |
850 | dev_priv->hws_map.flags = 0; | |
851 | dev_priv->hws_map.mtrr = 0; | |
852 | ||
dd0910b3 | 853 | drm_core_ioremap_wc(&dev_priv->hws_map, dev); |
dc7a9319 | 854 | if (dev_priv->hws_map.handle == NULL) { |
dc7a9319 | 855 | i915_dma_cleanup(dev); |
e20f9c64 | 856 | ring->status_page.gfx_addr = 0; |
dc7a9319 WZ |
857 | DRM_ERROR("can not ioremap virtual address for" |
858 | " G33 hw status page\n"); | |
20caafa6 | 859 | return -ENOMEM; |
dc7a9319 | 860 | } |
8187a2b7 ZN |
861 | ring->status_page.page_addr = dev_priv->hws_map.handle; |
862 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | |
863 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); | |
dc7a9319 | 864 | |
8a4c47f3 | 865 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", |
e20f9c64 | 866 | ring->status_page.gfx_addr); |
8a4c47f3 | 867 | DRM_DEBUG_DRIVER("load hws at %p\n", |
e20f9c64 | 868 | ring->status_page.page_addr); |
dc7a9319 WZ |
869 | return 0; |
870 | } | |
871 | ||
ec2a4c3f DA |
872 | static int i915_get_bridge_dev(struct drm_device *dev) |
873 | { | |
874 | struct drm_i915_private *dev_priv = dev->dev_private; | |
875 | ||
876 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); | |
877 | if (!dev_priv->bridge_dev) { | |
878 | DRM_ERROR("bridge device not found\n"); | |
879 | return -1; | |
880 | } | |
881 | return 0; | |
882 | } | |
883 | ||
c4804411 ZW |
884 | #define MCHBAR_I915 0x44 |
885 | #define MCHBAR_I965 0x48 | |
886 | #define MCHBAR_SIZE (4*4096) | |
887 | ||
888 | #define DEVEN_REG 0x54 | |
889 | #define DEVEN_MCHBAR_EN (1 << 28) | |
890 | ||
891 | /* Allocate space for the MCH regs if needed, return nonzero on error */ | |
892 | static int | |
893 | intel_alloc_mchbar_resource(struct drm_device *dev) | |
894 | { | |
895 | drm_i915_private_t *dev_priv = dev->dev_private; | |
a6c45cf0 | 896 | int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
c4804411 ZW |
897 | u32 temp_lo, temp_hi = 0; |
898 | u64 mchbar_addr; | |
a25c25c2 | 899 | int ret; |
c4804411 | 900 | |
a6c45cf0 | 901 | if (INTEL_INFO(dev)->gen >= 4) |
c4804411 ZW |
902 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); |
903 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); | |
904 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | |
905 | ||
906 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | |
907 | #ifdef CONFIG_PNP | |
908 | if (mchbar_addr && | |
a25c25c2 CW |
909 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) |
910 | return 0; | |
c4804411 ZW |
911 | #endif |
912 | ||
913 | /* Get some space for it */ | |
a25c25c2 CW |
914 | dev_priv->mch_res.name = "i915 MCHBAR"; |
915 | dev_priv->mch_res.flags = IORESOURCE_MEM; | |
916 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, | |
917 | &dev_priv->mch_res, | |
c4804411 ZW |
918 | MCHBAR_SIZE, MCHBAR_SIZE, |
919 | PCIBIOS_MIN_MEM, | |
a25c25c2 | 920 | 0, pcibios_align_resource, |
c4804411 ZW |
921 | dev_priv->bridge_dev); |
922 | if (ret) { | |
923 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); | |
924 | dev_priv->mch_res.start = 0; | |
a25c25c2 | 925 | return ret; |
c4804411 ZW |
926 | } |
927 | ||
a6c45cf0 | 928 | if (INTEL_INFO(dev)->gen >= 4) |
c4804411 ZW |
929 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, |
930 | upper_32_bits(dev_priv->mch_res.start)); | |
931 | ||
932 | pci_write_config_dword(dev_priv->bridge_dev, reg, | |
933 | lower_32_bits(dev_priv->mch_res.start)); | |
a25c25c2 | 934 | return 0; |
c4804411 ZW |
935 | } |
936 | ||
937 | /* Setup MCHBAR if possible, return true if we should disable it again */ | |
938 | static void | |
939 | intel_setup_mchbar(struct drm_device *dev) | |
940 | { | |
941 | drm_i915_private_t *dev_priv = dev->dev_private; | |
a6c45cf0 | 942 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
c4804411 ZW |
943 | u32 temp; |
944 | bool enabled; | |
945 | ||
946 | dev_priv->mchbar_need_disable = false; | |
947 | ||
948 | if (IS_I915G(dev) || IS_I915GM(dev)) { | |
949 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); | |
950 | enabled = !!(temp & DEVEN_MCHBAR_EN); | |
951 | } else { | |
952 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
953 | enabled = temp & 1; | |
954 | } | |
955 | ||
956 | /* If it's already enabled, don't have to do anything */ | |
957 | if (enabled) | |
958 | return; | |
959 | ||
960 | if (intel_alloc_mchbar_resource(dev)) | |
961 | return; | |
962 | ||
963 | dev_priv->mchbar_need_disable = true; | |
964 | ||
965 | /* Space is allocated or reserved, so enable it. */ | |
966 | if (IS_I915G(dev) || IS_I915GM(dev)) { | |
967 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, | |
968 | temp | DEVEN_MCHBAR_EN); | |
969 | } else { | |
970 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
971 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); | |
972 | } | |
973 | } | |
974 | ||
975 | static void | |
976 | intel_teardown_mchbar(struct drm_device *dev) | |
977 | { | |
978 | drm_i915_private_t *dev_priv = dev->dev_private; | |
a6c45cf0 | 979 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
c4804411 ZW |
980 | u32 temp; |
981 | ||
982 | if (dev_priv->mchbar_need_disable) { | |
983 | if (IS_I915G(dev) || IS_I915GM(dev)) { | |
984 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); | |
985 | temp &= ~DEVEN_MCHBAR_EN; | |
986 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); | |
987 | } else { | |
988 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
989 | temp &= ~1; | |
990 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); | |
991 | } | |
992 | } | |
993 | ||
994 | if (dev_priv->mch_res.start) | |
995 | release_resource(&dev_priv->mch_res); | |
996 | } | |
997 | ||
80824003 JB |
998 | #define PTE_ADDRESS_MASK 0xfffff000 |
999 | #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ | |
1000 | #define PTE_MAPPING_TYPE_UNCACHED (0 << 1) | |
1001 | #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ | |
1002 | #define PTE_MAPPING_TYPE_CACHED (3 << 1) | |
1003 | #define PTE_MAPPING_TYPE_MASK (3 << 1) | |
1004 | #define PTE_VALID (1 << 0) | |
1005 | ||
1006 | /** | |
1007 | * i915_gtt_to_phys - take a GTT address and turn it into a physical one | |
1008 | * @dev: drm device | |
1009 | * @gtt_addr: address to translate | |
1010 | * | |
1011 | * Some chip functions require allocations from stolen space but need the | |
1012 | * physical address of the memory in question. We use this routine | |
1013 | * to get a physical address suitable for register programming from a given | |
1014 | * GTT address. | |
1015 | */ | |
1016 | static unsigned long i915_gtt_to_phys(struct drm_device *dev, | |
1017 | unsigned long gtt_addr) | |
1018 | { | |
1019 | unsigned long *gtt; | |
1020 | unsigned long entry, phys; | |
a6c45cf0 | 1021 | int gtt_bar = IS_GEN2(dev) ? 1 : 0; |
80824003 JB |
1022 | int gtt_offset, gtt_size; |
1023 | ||
a6c45cf0 CW |
1024 | if (INTEL_INFO(dev)->gen >= 4) { |
1025 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) { | |
80824003 JB |
1026 | gtt_offset = 2*1024*1024; |
1027 | gtt_size = 2*1024*1024; | |
1028 | } else { | |
1029 | gtt_offset = 512*1024; | |
1030 | gtt_size = 512*1024; | |
1031 | } | |
1032 | } else { | |
1033 | gtt_bar = 3; | |
1034 | gtt_offset = 0; | |
1035 | gtt_size = pci_resource_len(dev->pdev, gtt_bar); | |
1036 | } | |
1037 | ||
1038 | gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset, | |
1039 | gtt_size); | |
1040 | if (!gtt) { | |
1041 | DRM_ERROR("ioremap of GTT failed\n"); | |
1042 | return 0; | |
1043 | } | |
1044 | ||
1045 | entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); | |
1046 | ||
44d98a61 | 1047 | DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); |
80824003 JB |
1048 | |
1049 | /* Mask out these reserved bits on this hardware. */ | |
a6c45cf0 | 1050 | if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev)) |
80824003 | 1051 | entry &= ~PTE_ADDRESS_MASK_HIGH; |
80824003 JB |
1052 | |
1053 | /* If it's not a mapping type we know, then bail. */ | |
1054 | if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && | |
1055 | (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) { | |
1056 | iounmap(gtt); | |
1057 | return 0; | |
1058 | } | |
1059 | ||
1060 | if (!(entry & PTE_VALID)) { | |
1061 | DRM_ERROR("bad GTT entry in stolen space\n"); | |
1062 | iounmap(gtt); | |
1063 | return 0; | |
1064 | } | |
1065 | ||
1066 | iounmap(gtt); | |
1067 | ||
1068 | phys =(entry & PTE_ADDRESS_MASK) | | |
1069 | ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); | |
1070 | ||
44d98a61 | 1071 | DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); |
80824003 JB |
1072 | |
1073 | return phys; | |
1074 | } | |
1075 | ||
1076 | static void i915_warn_stolen(struct drm_device *dev) | |
1077 | { | |
1078 | DRM_ERROR("not enough stolen space for compressed buffer, disabling\n"); | |
1079 | DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); | |
1080 | } | |
1081 | ||
1082 | static void i915_setup_compression(struct drm_device *dev, int size) | |
1083 | { | |
1084 | struct drm_i915_private *dev_priv = dev->dev_private; | |
132b6aab | 1085 | struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); |
29bd0ae2 AM |
1086 | unsigned long cfb_base; |
1087 | unsigned long ll_base = 0; | |
80824003 JB |
1088 | |
1089 | /* Leave 1M for line length buffer & misc. */ | |
19966754 | 1090 | compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0); |
80824003 | 1091 | if (!compressed_fb) { |
b5e50c3f | 1092 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; |
80824003 JB |
1093 | i915_warn_stolen(dev); |
1094 | return; | |
1095 | } | |
1096 | ||
1097 | compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); | |
1098 | if (!compressed_fb) { | |
1099 | i915_warn_stolen(dev); | |
b5e50c3f | 1100 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; |
80824003 JB |
1101 | return; |
1102 | } | |
1103 | ||
74dff282 JB |
1104 | cfb_base = i915_gtt_to_phys(dev, compressed_fb->start); |
1105 | if (!cfb_base) { | |
1106 | DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); | |
1107 | drm_mm_put_block(compressed_fb); | |
80824003 JB |
1108 | } |
1109 | ||
b52eb4dc | 1110 | if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { |
19966754 | 1111 | compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096, |
74dff282 JB |
1112 | 4096, 0); |
1113 | if (!compressed_llb) { | |
1114 | i915_warn_stolen(dev); | |
1115 | return; | |
1116 | } | |
1117 | ||
1118 | compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); | |
1119 | if (!compressed_llb) { | |
1120 | i915_warn_stolen(dev); | |
1121 | return; | |
1122 | } | |
1123 | ||
1124 | ll_base = i915_gtt_to_phys(dev, compressed_llb->start); | |
1125 | if (!ll_base) { | |
1126 | DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); | |
1127 | drm_mm_put_block(compressed_fb); | |
1128 | drm_mm_put_block(compressed_llb); | |
1129 | } | |
80824003 JB |
1130 | } |
1131 | ||
1132 | dev_priv->cfb_size = size; | |
1133 | ||
ee5382ae | 1134 | intel_disable_fbc(dev); |
20bf377e | 1135 | dev_priv->compressed_fb = compressed_fb; |
b52eb4dc ZY |
1136 | if (IS_IRONLAKE_M(dev)) |
1137 | I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); | |
1138 | else if (IS_GM45(dev)) { | |
74dff282 JB |
1139 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); |
1140 | } else { | |
74dff282 JB |
1141 | I915_WRITE(FBC_CFB_BASE, cfb_base); |
1142 | I915_WRITE(FBC_LL_BASE, ll_base); | |
20bf377e | 1143 | dev_priv->compressed_llb = compressed_llb; |
80824003 JB |
1144 | } |
1145 | ||
b52eb4dc | 1146 | DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, |
80824003 | 1147 | ll_base, size >> 20); |
80824003 JB |
1148 | } |
1149 | ||
20bf377e JB |
1150 | static void i915_cleanup_compression(struct drm_device *dev) |
1151 | { | |
1152 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1153 | ||
1154 | drm_mm_put_block(dev_priv->compressed_fb); | |
aebf0daf | 1155 | if (dev_priv->compressed_llb) |
20bf377e JB |
1156 | drm_mm_put_block(dev_priv->compressed_llb); |
1157 | } | |
1158 | ||
28d52043 DA |
1159 | /* true = enable decode, false = disable decoder */ |
1160 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | |
1161 | { | |
1162 | struct drm_device *dev = cookie; | |
1163 | ||
1164 | intel_modeset_vga_set_state(dev, state); | |
1165 | if (state) | |
1166 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | |
1167 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
1168 | else | |
1169 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
1170 | } | |
1171 | ||
6a9ee8af DA |
1172 | static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) |
1173 | { | |
1174 | struct drm_device *dev = pci_get_drvdata(pdev); | |
1175 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | |
1176 | if (state == VGA_SWITCHEROO_ON) { | |
fbf81762 | 1177 | printk(KERN_INFO "i915: switched on\n"); |
6a9ee8af DA |
1178 | /* i915 resume handler doesn't set to D0 */ |
1179 | pci_set_power_state(dev->pdev, PCI_D0); | |
1180 | i915_resume(dev); | |
1181 | } else { | |
1182 | printk(KERN_ERR "i915: switched off\n"); | |
1183 | i915_suspend(dev, pmm); | |
1184 | } | |
1185 | } | |
1186 | ||
1187 | static bool i915_switcheroo_can_switch(struct pci_dev *pdev) | |
1188 | { | |
1189 | struct drm_device *dev = pci_get_drvdata(pdev); | |
1190 | bool can_switch; | |
1191 | ||
1192 | spin_lock(&dev->count_lock); | |
1193 | can_switch = (dev->open_count == 0); | |
1194 | spin_unlock(&dev->count_lock); | |
1195 | return can_switch; | |
1196 | } | |
1197 | ||
53984635 | 1198 | static int i915_load_modeset_init(struct drm_device *dev) |
79e53945 JB |
1199 | { |
1200 | struct drm_i915_private *dev_priv = dev->dev_private; | |
53984635 | 1201 | unsigned long prealloc_size, gtt_size, mappable_size; |
79e53945 JB |
1202 | int ret = 0; |
1203 | ||
53984635 DV |
1204 | prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT; |
1205 | gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; | |
1206 | mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | |
1207 | gtt_size -= PAGE_SIZE; | |
1208 | ||
19966754 DV |
1209 | /* Basic memrange allocator for stolen space (aka mm.vram) */ |
1210 | drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size); | |
79e53945 | 1211 | |
13f4c435 EA |
1212 | /* Let GEM Manage from end of prealloc space to end of aperture. |
1213 | * | |
1214 | * However, leave one page at the end still bound to the scratch page. | |
1215 | * There are a number of places where the hardware apparently | |
1216 | * prefetches past the end of the object, and we've seen multiple | |
1217 | * hangs with the GPU head pointer stuck in a batchbuffer bound | |
1218 | * at the last page of the aperture. One page should be enough to | |
1219 | * keep any prefetching inside of the aperture. | |
1220 | */ | |
53984635 | 1221 | i915_gem_do_init(dev, prealloc_size, mappable_size, gtt_size); |
79e53945 | 1222 | |
11ed50ec | 1223 | mutex_lock(&dev->struct_mutex); |
79e53945 | 1224 | ret = i915_gem_init_ringbuffer(dev); |
11ed50ec | 1225 | mutex_unlock(&dev->struct_mutex); |
79e53945 | 1226 | if (ret) |
b8da7de5 | 1227 | goto out; |
79e53945 | 1228 | |
80824003 | 1229 | /* Try to set up FBC with a reasonable compressed buffer size */ |
9216d44d | 1230 | if (I915_HAS_FBC(dev) && i915_powersave) { |
80824003 JB |
1231 | int cfb_size; |
1232 | ||
1233 | /* Try to get an 8M buffer... */ | |
1234 | if (prealloc_size > (9*1024*1024)) | |
1235 | cfb_size = 8*1024*1024; | |
1236 | else /* fall back to 7/8 of the stolen space */ | |
1237 | cfb_size = prealloc_size * 7 / 8; | |
1238 | i915_setup_compression(dev, cfb_size); | |
1239 | } | |
1240 | ||
79e53945 JB |
1241 | /* Allow hardware batchbuffers unless told otherwise. |
1242 | */ | |
1243 | dev_priv->allow_batchbuffer = 1; | |
1244 | ||
6d139a87 | 1245 | ret = intel_parse_bios(dev); |
79e53945 JB |
1246 | if (ret) |
1247 | DRM_INFO("failed to find VBIOS tables\n"); | |
1248 | ||
28d52043 DA |
1249 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ |
1250 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); | |
1251 | if (ret) | |
5a79395b | 1252 | goto cleanup_ringbuffer; |
28d52043 | 1253 | |
723bfd70 JB |
1254 | intel_register_dsm_handler(); |
1255 | ||
6a9ee8af DA |
1256 | ret = vga_switcheroo_register_client(dev->pdev, |
1257 | i915_switcheroo_set_state, | |
1258 | i915_switcheroo_can_switch); | |
1259 | if (ret) | |
5a79395b | 1260 | goto cleanup_vga_client; |
6a9ee8af | 1261 | |
1afe3e9d JB |
1262 | /* IIR "flip pending" bit means done if this bit is set */ |
1263 | if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) | |
1264 | dev_priv->flip_pending_is_done = true; | |
1265 | ||
b01f2c3a JB |
1266 | intel_modeset_init(dev); |
1267 | ||
79e53945 JB |
1268 | ret = drm_irq_install(dev); |
1269 | if (ret) | |
5a79395b | 1270 | goto cleanup_vga_switcheroo; |
79e53945 | 1271 | |
79e53945 JB |
1272 | /* Always safe in the mode setting case. */ |
1273 | /* FIXME: do pre/post-mode set stuff in core KMS code */ | |
1274 | dev->vblank_disable_allowed = 1; | |
1275 | ||
5a79395b CW |
1276 | ret = intel_fbdev_init(dev); |
1277 | if (ret) | |
1278 | goto cleanup_irq; | |
1279 | ||
eb1f8e4f | 1280 | drm_kms_helper_poll_init(dev); |
87acb0a5 CW |
1281 | |
1282 | /* We're off and running w/KMS */ | |
1283 | dev_priv->mm.suspended = 0; | |
1284 | ||
79e53945 JB |
1285 | return 0; |
1286 | ||
5a79395b CW |
1287 | cleanup_irq: |
1288 | drm_irq_uninstall(dev); | |
1289 | cleanup_vga_switcheroo: | |
1290 | vga_switcheroo_unregister_client(dev->pdev); | |
1291 | cleanup_vga_client: | |
1292 | vga_client_register(dev->pdev, NULL, NULL, NULL); | |
1293 | cleanup_ringbuffer: | |
21099537 | 1294 | mutex_lock(&dev->struct_mutex); |
79e53945 | 1295 | i915_gem_cleanup_ringbuffer(dev); |
21099537 | 1296 | mutex_unlock(&dev->struct_mutex); |
79e53945 JB |
1297 | out: |
1298 | return ret; | |
1299 | } | |
1300 | ||
7c1c2871 DA |
1301 | int i915_master_create(struct drm_device *dev, struct drm_master *master) |
1302 | { | |
1303 | struct drm_i915_master_private *master_priv; | |
1304 | ||
9a298b2a | 1305 | master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); |
7c1c2871 DA |
1306 | if (!master_priv) |
1307 | return -ENOMEM; | |
1308 | ||
1309 | master->driver_priv = master_priv; | |
1310 | return 0; | |
1311 | } | |
1312 | ||
1313 | void i915_master_destroy(struct drm_device *dev, struct drm_master *master) | |
1314 | { | |
1315 | struct drm_i915_master_private *master_priv = master->driver_priv; | |
1316 | ||
1317 | if (!master_priv) | |
1318 | return; | |
1319 | ||
9a298b2a | 1320 | kfree(master_priv); |
7c1c2871 DA |
1321 | |
1322 | master->driver_priv = NULL; | |
1323 | } | |
1324 | ||
7648fa99 | 1325 | static void i915_pineview_get_mem_freq(struct drm_device *dev) |
7662c8bd SL |
1326 | { |
1327 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1328 | u32 tmp; | |
1329 | ||
7662c8bd SL |
1330 | tmp = I915_READ(CLKCFG); |
1331 | ||
1332 | switch (tmp & CLKCFG_FSB_MASK) { | |
1333 | case CLKCFG_FSB_533: | |
1334 | dev_priv->fsb_freq = 533; /* 133*4 */ | |
1335 | break; | |
1336 | case CLKCFG_FSB_800: | |
1337 | dev_priv->fsb_freq = 800; /* 200*4 */ | |
1338 | break; | |
1339 | case CLKCFG_FSB_667: | |
1340 | dev_priv->fsb_freq = 667; /* 167*4 */ | |
1341 | break; | |
1342 | case CLKCFG_FSB_400: | |
1343 | dev_priv->fsb_freq = 400; /* 100*4 */ | |
1344 | break; | |
1345 | } | |
1346 | ||
1347 | switch (tmp & CLKCFG_MEM_MASK) { | |
1348 | case CLKCFG_MEM_533: | |
1349 | dev_priv->mem_freq = 533; | |
1350 | break; | |
1351 | case CLKCFG_MEM_667: | |
1352 | dev_priv->mem_freq = 667; | |
1353 | break; | |
1354 | case CLKCFG_MEM_800: | |
1355 | dev_priv->mem_freq = 800; | |
1356 | break; | |
1357 | } | |
95534263 LP |
1358 | |
1359 | /* detect pineview DDR3 setting */ | |
1360 | tmp = I915_READ(CSHRDDR3CTL); | |
1361 | dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; | |
7662c8bd SL |
1362 | } |
1363 | ||
7648fa99 JB |
1364 | static void i915_ironlake_get_mem_freq(struct drm_device *dev) |
1365 | { | |
1366 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1367 | u16 ddrpll, csipll; | |
1368 | ||
1369 | ddrpll = I915_READ16(DDRMPLL1); | |
1370 | csipll = I915_READ16(CSIPLL0); | |
1371 | ||
1372 | switch (ddrpll & 0xff) { | |
1373 | case 0xc: | |
1374 | dev_priv->mem_freq = 800; | |
1375 | break; | |
1376 | case 0x10: | |
1377 | dev_priv->mem_freq = 1066; | |
1378 | break; | |
1379 | case 0x14: | |
1380 | dev_priv->mem_freq = 1333; | |
1381 | break; | |
1382 | case 0x18: | |
1383 | dev_priv->mem_freq = 1600; | |
1384 | break; | |
1385 | default: | |
1386 | DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", | |
1387 | ddrpll & 0xff); | |
1388 | dev_priv->mem_freq = 0; | |
1389 | break; | |
1390 | } | |
1391 | ||
1392 | dev_priv->r_t = dev_priv->mem_freq; | |
1393 | ||
1394 | switch (csipll & 0x3ff) { | |
1395 | case 0x00c: | |
1396 | dev_priv->fsb_freq = 3200; | |
1397 | break; | |
1398 | case 0x00e: | |
1399 | dev_priv->fsb_freq = 3733; | |
1400 | break; | |
1401 | case 0x010: | |
1402 | dev_priv->fsb_freq = 4266; | |
1403 | break; | |
1404 | case 0x012: | |
1405 | dev_priv->fsb_freq = 4800; | |
1406 | break; | |
1407 | case 0x014: | |
1408 | dev_priv->fsb_freq = 5333; | |
1409 | break; | |
1410 | case 0x016: | |
1411 | dev_priv->fsb_freq = 5866; | |
1412 | break; | |
1413 | case 0x018: | |
1414 | dev_priv->fsb_freq = 6400; | |
1415 | break; | |
1416 | default: | |
1417 | DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", | |
1418 | csipll & 0x3ff); | |
1419 | dev_priv->fsb_freq = 0; | |
1420 | break; | |
1421 | } | |
1422 | ||
1423 | if (dev_priv->fsb_freq == 3200) { | |
1424 | dev_priv->c_m = 0; | |
1425 | } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { | |
1426 | dev_priv->c_m = 1; | |
1427 | } else { | |
1428 | dev_priv->c_m = 2; | |
1429 | } | |
1430 | } | |
1431 | ||
1432 | struct v_table { | |
1433 | u8 vid; | |
1434 | unsigned long vd; /* in .1 mil */ | |
1435 | unsigned long vm; /* in .1 mil */ | |
1436 | u8 pvid; | |
1437 | }; | |
1438 | ||
1439 | static struct v_table v_table[] = { | |
1440 | { 0, 16125, 15000, 0x7f, }, | |
1441 | { 1, 16000, 14875, 0x7e, }, | |
1442 | { 2, 15875, 14750, 0x7d, }, | |
1443 | { 3, 15750, 14625, 0x7c, }, | |
1444 | { 4, 15625, 14500, 0x7b, }, | |
1445 | { 5, 15500, 14375, 0x7a, }, | |
1446 | { 6, 15375, 14250, 0x79, }, | |
1447 | { 7, 15250, 14125, 0x78, }, | |
1448 | { 8, 15125, 14000, 0x77, }, | |
1449 | { 9, 15000, 13875, 0x76, }, | |
1450 | { 10, 14875, 13750, 0x75, }, | |
1451 | { 11, 14750, 13625, 0x74, }, | |
1452 | { 12, 14625, 13500, 0x73, }, | |
1453 | { 13, 14500, 13375, 0x72, }, | |
1454 | { 14, 14375, 13250, 0x71, }, | |
1455 | { 15, 14250, 13125, 0x70, }, | |
1456 | { 16, 14125, 13000, 0x6f, }, | |
1457 | { 17, 14000, 12875, 0x6e, }, | |
1458 | { 18, 13875, 12750, 0x6d, }, | |
1459 | { 19, 13750, 12625, 0x6c, }, | |
1460 | { 20, 13625, 12500, 0x6b, }, | |
1461 | { 21, 13500, 12375, 0x6a, }, | |
1462 | { 22, 13375, 12250, 0x69, }, | |
1463 | { 23, 13250, 12125, 0x68, }, | |
1464 | { 24, 13125, 12000, 0x67, }, | |
1465 | { 25, 13000, 11875, 0x66, }, | |
1466 | { 26, 12875, 11750, 0x65, }, | |
1467 | { 27, 12750, 11625, 0x64, }, | |
1468 | { 28, 12625, 11500, 0x63, }, | |
1469 | { 29, 12500, 11375, 0x62, }, | |
1470 | { 30, 12375, 11250, 0x61, }, | |
1471 | { 31, 12250, 11125, 0x60, }, | |
1472 | { 32, 12125, 11000, 0x5f, }, | |
1473 | { 33, 12000, 10875, 0x5e, }, | |
1474 | { 34, 11875, 10750, 0x5d, }, | |
1475 | { 35, 11750, 10625, 0x5c, }, | |
1476 | { 36, 11625, 10500, 0x5b, }, | |
1477 | { 37, 11500, 10375, 0x5a, }, | |
1478 | { 38, 11375, 10250, 0x59, }, | |
1479 | { 39, 11250, 10125, 0x58, }, | |
1480 | { 40, 11125, 10000, 0x57, }, | |
1481 | { 41, 11000, 9875, 0x56, }, | |
1482 | { 42, 10875, 9750, 0x55, }, | |
1483 | { 43, 10750, 9625, 0x54, }, | |
1484 | { 44, 10625, 9500, 0x53, }, | |
1485 | { 45, 10500, 9375, 0x52, }, | |
1486 | { 46, 10375, 9250, 0x51, }, | |
1487 | { 47, 10250, 9125, 0x50, }, | |
1488 | { 48, 10125, 9000, 0x4f, }, | |
1489 | { 49, 10000, 8875, 0x4e, }, | |
1490 | { 50, 9875, 8750, 0x4d, }, | |
1491 | { 51, 9750, 8625, 0x4c, }, | |
1492 | { 52, 9625, 8500, 0x4b, }, | |
1493 | { 53, 9500, 8375, 0x4a, }, | |
1494 | { 54, 9375, 8250, 0x49, }, | |
1495 | { 55, 9250, 8125, 0x48, }, | |
1496 | { 56, 9125, 8000, 0x47, }, | |
1497 | { 57, 9000, 7875, 0x46, }, | |
1498 | { 58, 8875, 7750, 0x45, }, | |
1499 | { 59, 8750, 7625, 0x44, }, | |
1500 | { 60, 8625, 7500, 0x43, }, | |
1501 | { 61, 8500, 7375, 0x42, }, | |
1502 | { 62, 8375, 7250, 0x41, }, | |
1503 | { 63, 8250, 7125, 0x40, }, | |
1504 | { 64, 8125, 7000, 0x3f, }, | |
1505 | { 65, 8000, 6875, 0x3e, }, | |
1506 | { 66, 7875, 6750, 0x3d, }, | |
1507 | { 67, 7750, 6625, 0x3c, }, | |
1508 | { 68, 7625, 6500, 0x3b, }, | |
1509 | { 69, 7500, 6375, 0x3a, }, | |
1510 | { 70, 7375, 6250, 0x39, }, | |
1511 | { 71, 7250, 6125, 0x38, }, | |
1512 | { 72, 7125, 6000, 0x37, }, | |
1513 | { 73, 7000, 5875, 0x36, }, | |
1514 | { 74, 6875, 5750, 0x35, }, | |
1515 | { 75, 6750, 5625, 0x34, }, | |
1516 | { 76, 6625, 5500, 0x33, }, | |
1517 | { 77, 6500, 5375, 0x32, }, | |
1518 | { 78, 6375, 5250, 0x31, }, | |
1519 | { 79, 6250, 5125, 0x30, }, | |
1520 | { 80, 6125, 5000, 0x2f, }, | |
1521 | { 81, 6000, 4875, 0x2e, }, | |
1522 | { 82, 5875, 4750, 0x2d, }, | |
1523 | { 83, 5750, 4625, 0x2c, }, | |
1524 | { 84, 5625, 4500, 0x2b, }, | |
1525 | { 85, 5500, 4375, 0x2a, }, | |
1526 | { 86, 5375, 4250, 0x29, }, | |
1527 | { 87, 5250, 4125, 0x28, }, | |
1528 | { 88, 5125, 4000, 0x27, }, | |
1529 | { 89, 5000, 3875, 0x26, }, | |
1530 | { 90, 4875, 3750, 0x25, }, | |
1531 | { 91, 4750, 3625, 0x24, }, | |
1532 | { 92, 4625, 3500, 0x23, }, | |
1533 | { 93, 4500, 3375, 0x22, }, | |
1534 | { 94, 4375, 3250, 0x21, }, | |
1535 | { 95, 4250, 3125, 0x20, }, | |
1536 | { 96, 4125, 3000, 0x1f, }, | |
1537 | { 97, 4125, 3000, 0x1e, }, | |
1538 | { 98, 4125, 3000, 0x1d, }, | |
1539 | { 99, 4125, 3000, 0x1c, }, | |
1540 | { 100, 4125, 3000, 0x1b, }, | |
1541 | { 101, 4125, 3000, 0x1a, }, | |
1542 | { 102, 4125, 3000, 0x19, }, | |
1543 | { 103, 4125, 3000, 0x18, }, | |
1544 | { 104, 4125, 3000, 0x17, }, | |
1545 | { 105, 4125, 3000, 0x16, }, | |
1546 | { 106, 4125, 3000, 0x15, }, | |
1547 | { 107, 4125, 3000, 0x14, }, | |
1548 | { 108, 4125, 3000, 0x13, }, | |
1549 | { 109, 4125, 3000, 0x12, }, | |
1550 | { 110, 4125, 3000, 0x11, }, | |
1551 | { 111, 4125, 3000, 0x10, }, | |
1552 | { 112, 4125, 3000, 0x0f, }, | |
1553 | { 113, 4125, 3000, 0x0e, }, | |
1554 | { 114, 4125, 3000, 0x0d, }, | |
1555 | { 115, 4125, 3000, 0x0c, }, | |
1556 | { 116, 4125, 3000, 0x0b, }, | |
1557 | { 117, 4125, 3000, 0x0a, }, | |
1558 | { 118, 4125, 3000, 0x09, }, | |
1559 | { 119, 4125, 3000, 0x08, }, | |
1560 | { 120, 1125, 0, 0x07, }, | |
1561 | { 121, 1000, 0, 0x06, }, | |
1562 | { 122, 875, 0, 0x05, }, | |
1563 | { 123, 750, 0, 0x04, }, | |
1564 | { 124, 625, 0, 0x03, }, | |
1565 | { 125, 500, 0, 0x02, }, | |
1566 | { 126, 375, 0, 0x01, }, | |
1567 | { 127, 0, 0, 0x00, }, | |
1568 | }; | |
1569 | ||
1570 | struct cparams { | |
1571 | int i; | |
1572 | int t; | |
1573 | int m; | |
1574 | int c; | |
1575 | }; | |
1576 | ||
1577 | static struct cparams cparams[] = { | |
1578 | { 1, 1333, 301, 28664 }, | |
1579 | { 1, 1066, 294, 24460 }, | |
1580 | { 1, 800, 294, 25192 }, | |
1581 | { 0, 1333, 276, 27605 }, | |
1582 | { 0, 1066, 276, 27605 }, | |
1583 | { 0, 800, 231, 23784 }, | |
1584 | }; | |
1585 | ||
1586 | unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) | |
1587 | { | |
1588 | u64 total_count, diff, ret; | |
1589 | u32 count1, count2, count3, m = 0, c = 0; | |
1590 | unsigned long now = jiffies_to_msecs(jiffies), diff1; | |
1591 | int i; | |
1592 | ||
1593 | diff1 = now - dev_priv->last_time1; | |
1594 | ||
1595 | count1 = I915_READ(DMIEC); | |
1596 | count2 = I915_READ(DDREC); | |
1597 | count3 = I915_READ(CSIEC); | |
1598 | ||
1599 | total_count = count1 + count2 + count3; | |
1600 | ||
1601 | /* FIXME: handle per-counter overflow */ | |
1602 | if (total_count < dev_priv->last_count1) { | |
1603 | diff = ~0UL - dev_priv->last_count1; | |
1604 | diff += total_count; | |
1605 | } else { | |
1606 | diff = total_count - dev_priv->last_count1; | |
1607 | } | |
1608 | ||
1609 | for (i = 0; i < ARRAY_SIZE(cparams); i++) { | |
1610 | if (cparams[i].i == dev_priv->c_m && | |
1611 | cparams[i].t == dev_priv->r_t) { | |
1612 | m = cparams[i].m; | |
1613 | c = cparams[i].c; | |
1614 | break; | |
1615 | } | |
1616 | } | |
1617 | ||
d270ae34 | 1618 | diff = div_u64(diff, diff1); |
7648fa99 | 1619 | ret = ((m * diff) + c); |
d270ae34 | 1620 | ret = div_u64(ret, 10); |
7648fa99 JB |
1621 | |
1622 | dev_priv->last_count1 = total_count; | |
1623 | dev_priv->last_time1 = now; | |
1624 | ||
1625 | return ret; | |
1626 | } | |
1627 | ||
1628 | unsigned long i915_mch_val(struct drm_i915_private *dev_priv) | |
1629 | { | |
1630 | unsigned long m, x, b; | |
1631 | u32 tsfs; | |
1632 | ||
1633 | tsfs = I915_READ(TSFS); | |
1634 | ||
1635 | m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); | |
1636 | x = I915_READ8(TR1); | |
1637 | ||
1638 | b = tsfs & TSFS_INTR_MASK; | |
1639 | ||
1640 | return ((m * x) / 127) - b; | |
1641 | } | |
1642 | ||
1643 | static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) | |
1644 | { | |
1645 | unsigned long val = 0; | |
1646 | int i; | |
1647 | ||
1648 | for (i = 0; i < ARRAY_SIZE(v_table); i++) { | |
1649 | if (v_table[i].pvid == pxvid) { | |
1650 | if (IS_MOBILE(dev_priv->dev)) | |
1651 | val = v_table[i].vm; | |
1652 | else | |
1653 | val = v_table[i].vd; | |
1654 | } | |
1655 | } | |
1656 | ||
1657 | return val; | |
1658 | } | |
1659 | ||
1660 | void i915_update_gfx_val(struct drm_i915_private *dev_priv) | |
1661 | { | |
1662 | struct timespec now, diff1; | |
1663 | u64 diff; | |
1664 | unsigned long diffms; | |
1665 | u32 count; | |
1666 | ||
1667 | getrawmonotonic(&now); | |
1668 | diff1 = timespec_sub(now, dev_priv->last_time2); | |
1669 | ||
1670 | /* Don't divide by 0 */ | |
1671 | diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; | |
1672 | if (!diffms) | |
1673 | return; | |
1674 | ||
1675 | count = I915_READ(GFXEC); | |
1676 | ||
1677 | if (count < dev_priv->last_count2) { | |
1678 | diff = ~0UL - dev_priv->last_count2; | |
1679 | diff += count; | |
1680 | } else { | |
1681 | diff = count - dev_priv->last_count2; | |
1682 | } | |
1683 | ||
1684 | dev_priv->last_count2 = count; | |
1685 | dev_priv->last_time2 = now; | |
1686 | ||
1687 | /* More magic constants... */ | |
1688 | diff = diff * 1181; | |
d270ae34 | 1689 | diff = div_u64(diff, diffms * 10); |
7648fa99 JB |
1690 | dev_priv->gfx_power = diff; |
1691 | } | |
1692 | ||
1693 | unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) | |
1694 | { | |
1695 | unsigned long t, corr, state1, corr2, state2; | |
1696 | u32 pxvid, ext_v; | |
1697 | ||
1698 | pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); | |
1699 | pxvid = (pxvid >> 24) & 0x7f; | |
1700 | ext_v = pvid_to_extvid(dev_priv, pxvid); | |
1701 | ||
1702 | state1 = ext_v; | |
1703 | ||
1704 | t = i915_mch_val(dev_priv); | |
1705 | ||
1706 | /* Revel in the empirically derived constants */ | |
1707 | ||
1708 | /* Correction factor in 1/100000 units */ | |
1709 | if (t > 80) | |
1710 | corr = ((t * 2349) + 135940); | |
1711 | else if (t >= 50) | |
1712 | corr = ((t * 964) + 29317); | |
1713 | else /* < 50 */ | |
1714 | corr = ((t * 301) + 1004); | |
1715 | ||
1716 | corr = corr * ((150142 * state1) / 10000 - 78642); | |
1717 | corr /= 100000; | |
1718 | corr2 = (corr * dev_priv->corr); | |
1719 | ||
1720 | state2 = (corr2 * state1) / 10000; | |
1721 | state2 /= 100; /* convert to mW */ | |
1722 | ||
1723 | i915_update_gfx_val(dev_priv); | |
1724 | ||
1725 | return dev_priv->gfx_power + state2; | |
1726 | } | |
1727 | ||
1728 | /* Global for IPS driver to get at the current i915 device */ | |
1729 | static struct drm_i915_private *i915_mch_dev; | |
1730 | /* | |
1731 | * Lock protecting IPS related data structures | |
1732 | * - i915_mch_dev | |
1733 | * - dev_priv->max_delay | |
1734 | * - dev_priv->min_delay | |
1735 | * - dev_priv->fmax | |
1736 | * - dev_priv->gpu_busy | |
1737 | */ | |
995b6762 | 1738 | static DEFINE_SPINLOCK(mchdev_lock); |
7648fa99 JB |
1739 | |
1740 | /** | |
1741 | * i915_read_mch_val - return value for IPS use | |
1742 | * | |
1743 | * Calculate and return a value for the IPS driver to use when deciding whether | |
1744 | * we have thermal and power headroom to increase CPU or GPU power budget. | |
1745 | */ | |
1746 | unsigned long i915_read_mch_val(void) | |
1747 | { | |
1748 | struct drm_i915_private *dev_priv; | |
1749 | unsigned long chipset_val, graphics_val, ret = 0; | |
1750 | ||
1751 | spin_lock(&mchdev_lock); | |
1752 | if (!i915_mch_dev) | |
1753 | goto out_unlock; | |
1754 | dev_priv = i915_mch_dev; | |
1755 | ||
1756 | chipset_val = i915_chipset_val(dev_priv); | |
1757 | graphics_val = i915_gfx_val(dev_priv); | |
1758 | ||
1759 | ret = chipset_val + graphics_val; | |
1760 | ||
1761 | out_unlock: | |
1762 | spin_unlock(&mchdev_lock); | |
1763 | ||
1764 | return ret; | |
1765 | } | |
1766 | EXPORT_SYMBOL_GPL(i915_read_mch_val); | |
1767 | ||
1768 | /** | |
1769 | * i915_gpu_raise - raise GPU frequency limit | |
1770 | * | |
1771 | * Raise the limit; IPS indicates we have thermal headroom. | |
1772 | */ | |
1773 | bool i915_gpu_raise(void) | |
1774 | { | |
1775 | struct drm_i915_private *dev_priv; | |
1776 | bool ret = true; | |
1777 | ||
1778 | spin_lock(&mchdev_lock); | |
1779 | if (!i915_mch_dev) { | |
1780 | ret = false; | |
1781 | goto out_unlock; | |
1782 | } | |
1783 | dev_priv = i915_mch_dev; | |
1784 | ||
1785 | if (dev_priv->max_delay > dev_priv->fmax) | |
1786 | dev_priv->max_delay--; | |
1787 | ||
1788 | out_unlock: | |
1789 | spin_unlock(&mchdev_lock); | |
1790 | ||
1791 | return ret; | |
1792 | } | |
1793 | EXPORT_SYMBOL_GPL(i915_gpu_raise); | |
1794 | ||
1795 | /** | |
1796 | * i915_gpu_lower - lower GPU frequency limit | |
1797 | * | |
1798 | * IPS indicates we're close to a thermal limit, so throttle back the GPU | |
1799 | * frequency maximum. | |
1800 | */ | |
1801 | bool i915_gpu_lower(void) | |
1802 | { | |
1803 | struct drm_i915_private *dev_priv; | |
1804 | bool ret = true; | |
1805 | ||
1806 | spin_lock(&mchdev_lock); | |
1807 | if (!i915_mch_dev) { | |
1808 | ret = false; | |
1809 | goto out_unlock; | |
1810 | } | |
1811 | dev_priv = i915_mch_dev; | |
1812 | ||
1813 | if (dev_priv->max_delay < dev_priv->min_delay) | |
1814 | dev_priv->max_delay++; | |
1815 | ||
1816 | out_unlock: | |
1817 | spin_unlock(&mchdev_lock); | |
1818 | ||
1819 | return ret; | |
1820 | } | |
1821 | EXPORT_SYMBOL_GPL(i915_gpu_lower); | |
1822 | ||
1823 | /** | |
1824 | * i915_gpu_busy - indicate GPU business to IPS | |
1825 | * | |
1826 | * Tell the IPS driver whether or not the GPU is busy. | |
1827 | */ | |
1828 | bool i915_gpu_busy(void) | |
1829 | { | |
1830 | struct drm_i915_private *dev_priv; | |
1831 | bool ret = false; | |
1832 | ||
1833 | spin_lock(&mchdev_lock); | |
1834 | if (!i915_mch_dev) | |
1835 | goto out_unlock; | |
1836 | dev_priv = i915_mch_dev; | |
1837 | ||
1838 | ret = dev_priv->busy; | |
1839 | ||
1840 | out_unlock: | |
1841 | spin_unlock(&mchdev_lock); | |
1842 | ||
1843 | return ret; | |
1844 | } | |
1845 | EXPORT_SYMBOL_GPL(i915_gpu_busy); | |
1846 | ||
1847 | /** | |
1848 | * i915_gpu_turbo_disable - disable graphics turbo | |
1849 | * | |
1850 | * Disable graphics turbo by resetting the max frequency and setting the | |
1851 | * current frequency to the default. | |
1852 | */ | |
1853 | bool i915_gpu_turbo_disable(void) | |
1854 | { | |
1855 | struct drm_i915_private *dev_priv; | |
1856 | bool ret = true; | |
1857 | ||
1858 | spin_lock(&mchdev_lock); | |
1859 | if (!i915_mch_dev) { | |
1860 | ret = false; | |
1861 | goto out_unlock; | |
1862 | } | |
1863 | dev_priv = i915_mch_dev; | |
1864 | ||
1865 | dev_priv->max_delay = dev_priv->fstart; | |
1866 | ||
1867 | if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) | |
1868 | ret = false; | |
1869 | ||
1870 | out_unlock: | |
1871 | spin_unlock(&mchdev_lock); | |
1872 | ||
1873 | return ret; | |
1874 | } | |
1875 | EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); | |
1876 | ||
79e53945 JB |
1877 | /** |
1878 | * i915_driver_load - setup chip and create an initial config | |
1879 | * @dev: DRM device | |
1880 | * @flags: startup flags | |
1881 | * | |
1882 | * The driver load routine has to do several things: | |
1883 | * - drive output discovery via intel_modeset_init() | |
1884 | * - initialize the memory manager | |
1885 | * - allocate initial config memory | |
1886 | * - setup the DRM framebuffer with the allocated memory | |
1887 | */ | |
84b1fd10 | 1888 | int i915_driver_load(struct drm_device *dev, unsigned long flags) |
22eae947 | 1889 | { |
ea059a1e | 1890 | struct drm_i915_private *dev_priv; |
d883f7f1 | 1891 | resource_size_t base, size; |
cfdf1fa2 | 1892 | int ret = 0, mmio_bar; |
ac622a9c | 1893 | uint32_t agp_size, prealloc_size; |
22eae947 DA |
1894 | /* i915 has 4 more counters */ |
1895 | dev->counters += 4; | |
1896 | dev->types[6] = _DRM_STAT_IRQ; | |
1897 | dev->types[7] = _DRM_STAT_PRIMARY; | |
1898 | dev->types[8] = _DRM_STAT_SECONDARY; | |
1899 | dev->types[9] = _DRM_STAT_DMA; | |
1900 | ||
9a298b2a | 1901 | dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); |
ba8bbcf6 JB |
1902 | if (dev_priv == NULL) |
1903 | return -ENOMEM; | |
1904 | ||
ba8bbcf6 | 1905 | dev->dev_private = (void *)dev_priv; |
673a394b | 1906 | dev_priv->dev = dev; |
cfdf1fa2 | 1907 | dev_priv->info = (struct intel_device_info *) flags; |
ba8bbcf6 JB |
1908 | |
1909 | /* Add register map (needed for suspend/resume) */ | |
a6c45cf0 | 1910 | mmio_bar = IS_GEN2(dev) ? 1 : 0; |
01d73a69 JC |
1911 | base = pci_resource_start(dev->pdev, mmio_bar); |
1912 | size = pci_resource_len(dev->pdev, mmio_bar); | |
ba8bbcf6 | 1913 | |
ec2a4c3f DA |
1914 | if (i915_get_bridge_dev(dev)) { |
1915 | ret = -EIO; | |
1916 | goto free_priv; | |
1917 | } | |
1918 | ||
9f82d238 DV |
1919 | /* overlay on gen2 is broken and can't address above 1G */ |
1920 | if (IS_GEN2(dev)) | |
1921 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | |
1922 | ||
3043c60c | 1923 | dev_priv->regs = ioremap(base, size); |
79e53945 JB |
1924 | if (!dev_priv->regs) { |
1925 | DRM_ERROR("failed to map registers\n"); | |
1926 | ret = -EIO; | |
ec2a4c3f | 1927 | goto put_bridge; |
79e53945 | 1928 | } |
ed4cb414 | 1929 | |
ab657db1 EA |
1930 | dev_priv->mm.gtt_mapping = |
1931 | io_mapping_create_wc(dev->agp->base, | |
1932 | dev->agp->agp_info.aper_size * 1024*1024); | |
6644107d VP |
1933 | if (dev_priv->mm.gtt_mapping == NULL) { |
1934 | ret = -EIO; | |
1935 | goto out_rmmap; | |
1936 | } | |
1937 | ||
ab657db1 EA |
1938 | /* Set up a WC MTRR for non-PAT systems. This is more common than |
1939 | * one would think, because the kernel disables PAT on first | |
1940 | * generation Core chips because WC PAT gets overridden by a UC | |
1941 | * MTRR if present. Even if a UC MTRR isn't present. | |
1942 | */ | |
1943 | dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, | |
1944 | dev->agp->agp_info.aper_size * | |
1945 | 1024 * 1024, | |
1946 | MTRR_TYPE_WRCOMB, 1); | |
1947 | if (dev_priv->mm.gtt_mtrr < 0) { | |
040aefa2 | 1948 | DRM_INFO("MTRR allocation failed. Graphics " |
ab657db1 EA |
1949 | "performance may suffer.\n"); |
1950 | } | |
1951 | ||
19966754 DV |
1952 | dev_priv->mm.gtt = intel_gtt_get(); |
1953 | if (!dev_priv->mm.gtt) { | |
1954 | DRM_ERROR("Failed to initialize GTT\n"); | |
1955 | ret = -ENODEV; | |
2a34f5e6 | 1956 | goto out_iomapfree; |
d1d6ca73 JB |
1957 | } |
1958 | ||
19966754 DV |
1959 | prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT; |
1960 | agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | |
1961 | ||
e642abbf CW |
1962 | /* The i915 workqueue is primarily used for batched retirement of |
1963 | * requests (and thus managing bo) once the task has been completed | |
1964 | * by the GPU. i915_gem_retire_requests() is called directly when we | |
1965 | * need high-priority retirement, such as waiting for an explicit | |
1966 | * bo. | |
1967 | * | |
1968 | * It is also used for periodic low-priority events, such as | |
1969 | * idle-timers and hangcheck. | |
1970 | * | |
1971 | * All tasks on the workqueue are expected to acquire the dev mutex | |
1972 | * so there is no point in running more than one instance of the | |
1973 | * workqueue at any time: max_active = 1 and NON_REENTRANT. | |
1974 | */ | |
1975 | dev_priv->wq = alloc_workqueue("i915", | |
1976 | WQ_UNBOUND | WQ_NON_REENTRANT, | |
1977 | 1); | |
9c9fe1f8 EA |
1978 | if (dev_priv->wq == NULL) { |
1979 | DRM_ERROR("Failed to create our workqueue.\n"); | |
1980 | ret = -ENOMEM; | |
1981 | goto out_iomapfree; | |
1982 | } | |
1983 | ||
ac5c4e76 DA |
1984 | /* enable GEM by default */ |
1985 | dev_priv->has_gem = 1; | |
ac5c4e76 | 1986 | |
2a34f5e6 EA |
1987 | if (prealloc_size > agp_size * 3 / 4) { |
1988 | DRM_ERROR("Detected broken video BIOS with %d/%dkB of video " | |
1989 | "memory stolen.\n", | |
1990 | prealloc_size / 1024, agp_size / 1024); | |
1991 | DRM_ERROR("Disabling GEM. (try reducing stolen memory or " | |
1992 | "updating the BIOS to fix).\n"); | |
1993 | dev_priv->has_gem = 0; | |
1994 | } | |
1995 | ||
79a78dd6 CW |
1996 | if (dev_priv->has_gem == 0 && |
1997 | drm_core_check_feature(dev, DRIVER_MODESET)) { | |
1998 | DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); | |
1999 | ret = -ENODEV; | |
2000 | goto out_iomapfree; | |
2001 | } | |
2002 | ||
9880b7a5 | 2003 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
42c2798b | 2004 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ |
f00a3ddf | 2005 | if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { |
42c2798b | 2006 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ |
9880b7a5 | 2007 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; |
42c2798b | 2008 | } |
9880b7a5 | 2009 | |
c4804411 ZW |
2010 | /* Try to make sure MCHBAR is enabled before poking at it */ |
2011 | intel_setup_mchbar(dev); | |
f899fc64 | 2012 | intel_setup_gmbus(dev); |
44834a67 | 2013 | intel_opregion_setup(dev); |
c4804411 | 2014 | |
6d139a87 BF |
2015 | /* Make sure the bios did its job and set up vital registers */ |
2016 | intel_setup_bios(dev); | |
2017 | ||
673a394b EA |
2018 | i915_gem_load(dev); |
2019 | ||
398c9cb2 KP |
2020 | /* Init HWS */ |
2021 | if (!I915_NEED_GFX_HWS(dev)) { | |
2022 | ret = i915_init_phys_hws(dev); | |
2023 | if (ret != 0) | |
9c9fe1f8 | 2024 | goto out_workqueue_free; |
398c9cb2 | 2025 | } |
ed4cb414 | 2026 | |
7648fa99 JB |
2027 | if (IS_PINEVIEW(dev)) |
2028 | i915_pineview_get_mem_freq(dev); | |
f00a3ddf | 2029 | else if (IS_GEN5(dev)) |
7648fa99 | 2030 | i915_ironlake_get_mem_freq(dev); |
7662c8bd | 2031 | |
ed4cb414 EA |
2032 | /* On the 945G/GM, the chipset reports the MSI capability on the |
2033 | * integrated graphics even though the support isn't actually there | |
2034 | * according to the published specs. It doesn't appear to function | |
2035 | * correctly in testing on 945G. | |
2036 | * This may be a side effect of MSI having been made available for PEG | |
2037 | * and the registers being closely associated. | |
d1ed629f KP |
2038 | * |
2039 | * According to chipset errata, on the 965GM, MSI interrupts may | |
b60678a7 KP |
2040 | * be lost or delayed, but we use them anyways to avoid |
2041 | * stuck interrupts on some machines. | |
ed4cb414 | 2042 | */ |
b60678a7 | 2043 | if (!IS_I945G(dev) && !IS_I945GM(dev)) |
d3e74d02 | 2044 | pci_enable_msi(dev->pdev); |
ed4cb414 EA |
2045 | |
2046 | spin_lock_init(&dev_priv->user_irq_lock); | |
63eeaf38 | 2047 | spin_lock_init(&dev_priv->error_lock); |
9d34e5db | 2048 | dev_priv->trace_irq_seqno = 0; |
ed4cb414 | 2049 | |
52440211 KP |
2050 | ret = drm_vblank_init(dev, I915_NUM_PIPE); |
2051 | ||
2052 | if (ret) { | |
2053 | (void) i915_driver_unload(dev); | |
2054 | return ret; | |
2055 | } | |
2056 | ||
11ed50ec BG |
2057 | /* Start out suspended */ |
2058 | dev_priv->mm.suspended = 1; | |
2059 | ||
3bad0781 ZW |
2060 | intel_detect_pch(dev); |
2061 | ||
79e53945 | 2062 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
53984635 | 2063 | ret = i915_load_modeset_init(dev); |
79e53945 JB |
2064 | if (ret < 0) { |
2065 | DRM_ERROR("failed to init modeset\n"); | |
9c9fe1f8 | 2066 | goto out_workqueue_free; |
79e53945 JB |
2067 | } |
2068 | } | |
2069 | ||
74a365b3 | 2070 | /* Must be done after probing outputs */ |
44834a67 CW |
2071 | intel_opregion_init(dev); |
2072 | acpi_video_register(); | |
74a365b3 | 2073 | |
f65d9421 BG |
2074 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, |
2075 | (unsigned long) dev); | |
7648fa99 JB |
2076 | |
2077 | spin_lock(&mchdev_lock); | |
2078 | i915_mch_dev = dev_priv; | |
2079 | dev_priv->mchdev_lock = &mchdev_lock; | |
2080 | spin_unlock(&mchdev_lock); | |
2081 | ||
79e53945 JB |
2082 | return 0; |
2083 | ||
9c9fe1f8 EA |
2084 | out_workqueue_free: |
2085 | destroy_workqueue(dev_priv->wq); | |
6644107d VP |
2086 | out_iomapfree: |
2087 | io_mapping_free(dev_priv->mm.gtt_mapping); | |
79e53945 JB |
2088 | out_rmmap: |
2089 | iounmap(dev_priv->regs); | |
ec2a4c3f DA |
2090 | put_bridge: |
2091 | pci_dev_put(dev_priv->bridge_dev); | |
79e53945 | 2092 | free_priv: |
9a298b2a | 2093 | kfree(dev_priv); |
ba8bbcf6 JB |
2094 | return ret; |
2095 | } | |
2096 | ||
2097 | int i915_driver_unload(struct drm_device *dev) | |
2098 | { | |
2099 | struct drm_i915_private *dev_priv = dev->dev_private; | |
c911fc1c | 2100 | int ret; |
ba8bbcf6 | 2101 | |
7648fa99 JB |
2102 | spin_lock(&mchdev_lock); |
2103 | i915_mch_dev = NULL; | |
2104 | spin_unlock(&mchdev_lock); | |
2105 | ||
c911fc1c DV |
2106 | mutex_lock(&dev->struct_mutex); |
2107 | ret = i915_gpu_idle(dev); | |
2108 | if (ret) | |
2109 | DRM_ERROR("failed to idle hardware: %d\n", ret); | |
2110 | mutex_unlock(&dev->struct_mutex); | |
2111 | ||
75ef9da2 DV |
2112 | /* Cancel the retire work handler, which should be idle now. */ |
2113 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | |
2114 | ||
ab657db1 EA |
2115 | io_mapping_free(dev_priv->mm.gtt_mapping); |
2116 | if (dev_priv->mm.gtt_mtrr >= 0) { | |
2117 | mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, | |
2118 | dev->agp->agp_info.aper_size * 1024 * 1024); | |
2119 | dev_priv->mm.gtt_mtrr = -1; | |
2120 | } | |
2121 | ||
44834a67 CW |
2122 | acpi_video_unregister(); |
2123 | ||
79e53945 | 2124 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
7b4f3990 | 2125 | intel_fbdev_fini(dev); |
3d8620cc JB |
2126 | intel_modeset_cleanup(dev); |
2127 | ||
6363ee6f ZY |
2128 | /* |
2129 | * free the memory space allocated for the child device | |
2130 | * config parsed from VBT | |
2131 | */ | |
2132 | if (dev_priv->child_dev && dev_priv->child_dev_num) { | |
2133 | kfree(dev_priv->child_dev); | |
2134 | dev_priv->child_dev = NULL; | |
2135 | dev_priv->child_dev_num = 0; | |
2136 | } | |
6c0d9350 | 2137 | |
6a9ee8af | 2138 | vga_switcheroo_unregister_client(dev->pdev); |
28d52043 | 2139 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
79e53945 JB |
2140 | } |
2141 | ||
a8b4899e | 2142 | /* Free error state after interrupts are fully disabled. */ |
bc0c7f14 DV |
2143 | del_timer_sync(&dev_priv->hangcheck_timer); |
2144 | cancel_work_sync(&dev_priv->error_work); | |
a8b4899e | 2145 | i915_destroy_error_state(dev); |
bc0c7f14 | 2146 | |
ed4cb414 EA |
2147 | if (dev->pdev->msi_enabled) |
2148 | pci_disable_msi(dev->pdev); | |
2149 | ||
44834a67 | 2150 | intel_opregion_fini(dev); |
8ee1c3db | 2151 | |
79e53945 | 2152 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
67e77c5a DV |
2153 | /* Flush any outstanding unpin_work. */ |
2154 | flush_workqueue(dev_priv->wq); | |
2155 | ||
71acb5eb DA |
2156 | i915_gem_free_all_phys_object(dev); |
2157 | ||
79e53945 JB |
2158 | mutex_lock(&dev->struct_mutex); |
2159 | i915_gem_cleanup_ringbuffer(dev); | |
2160 | mutex_unlock(&dev->struct_mutex); | |
20bf377e JB |
2161 | if (I915_HAS_FBC(dev) && i915_powersave) |
2162 | i915_cleanup_compression(dev); | |
19966754 | 2163 | drm_mm_takedown(&dev_priv->mm.vram); |
02e792fb DV |
2164 | |
2165 | intel_cleanup_overlay(dev); | |
c2873e96 KP |
2166 | |
2167 | if (!I915_NEED_GFX_HWS(dev)) | |
2168 | i915_free_hws(dev); | |
79e53945 JB |
2169 | } |
2170 | ||
701394cc DV |
2171 | if (dev_priv->regs != NULL) |
2172 | iounmap(dev_priv->regs); | |
2173 | ||
f899fc64 | 2174 | intel_teardown_gmbus(dev); |
c4804411 ZW |
2175 | intel_teardown_mchbar(dev); |
2176 | ||
bc0c7f14 DV |
2177 | destroy_workqueue(dev_priv->wq); |
2178 | ||
ec2a4c3f | 2179 | pci_dev_put(dev_priv->bridge_dev); |
9a298b2a | 2180 | kfree(dev->dev_private); |
ba8bbcf6 | 2181 | |
22eae947 DA |
2182 | return 0; |
2183 | } | |
2184 | ||
f787a5f5 | 2185 | int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
673a394b | 2186 | { |
f787a5f5 | 2187 | struct drm_i915_file_private *file_priv; |
673a394b | 2188 | |
8a4c47f3 | 2189 | DRM_DEBUG_DRIVER("\n"); |
f787a5f5 CW |
2190 | file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); |
2191 | if (!file_priv) | |
673a394b EA |
2192 | return -ENOMEM; |
2193 | ||
f787a5f5 | 2194 | file->driver_priv = file_priv; |
673a394b | 2195 | |
1c25595f | 2196 | spin_lock_init(&file_priv->mm.lock); |
f787a5f5 | 2197 | INIT_LIST_HEAD(&file_priv->mm.request_list); |
673a394b EA |
2198 | |
2199 | return 0; | |
2200 | } | |
2201 | ||
79e53945 JB |
2202 | /** |
2203 | * i915_driver_lastclose - clean up after all DRM clients have exited | |
2204 | * @dev: DRM device | |
2205 | * | |
2206 | * Take care of cleaning up after all DRM clients have exited. In the | |
2207 | * mode setting case, we want to restore the kernel's initial mode (just | |
2208 | * in case the last client left us in a bad state). | |
2209 | * | |
2210 | * Additionally, in the non-mode setting case, we'll tear down the AGP | |
2211 | * and DMA structures, since the kernel won't be using them, and clea | |
2212 | * up any GEM state. | |
2213 | */ | |
84b1fd10 | 2214 | void i915_driver_lastclose(struct drm_device * dev) |
1da177e4 | 2215 | { |
ba8bbcf6 JB |
2216 | drm_i915_private_t *dev_priv = dev->dev_private; |
2217 | ||
79e53945 | 2218 | if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { |
785b93ef | 2219 | drm_fb_helper_restore(); |
6a9ee8af | 2220 | vga_switcheroo_process_delayed_switch(); |
144a75fa | 2221 | return; |
79e53945 | 2222 | } |
144a75fa | 2223 | |
673a394b EA |
2224 | i915_gem_lastclose(dev); |
2225 | ||
ba8bbcf6 | 2226 | if (dev_priv->agp_heap) |
b5e89ed5 | 2227 | i915_mem_takedown(&(dev_priv->agp_heap)); |
ba8bbcf6 | 2228 | |
b5e89ed5 | 2229 | i915_dma_cleanup(dev); |
1da177e4 LT |
2230 | } |
2231 | ||
6c340eac | 2232 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) |
1da177e4 | 2233 | { |
ba8bbcf6 | 2234 | drm_i915_private_t *dev_priv = dev->dev_private; |
b962442e | 2235 | i915_gem_release(dev, file_priv); |
79e53945 JB |
2236 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
2237 | i915_mem_release(dev, file_priv, dev_priv->agp_heap); | |
1da177e4 LT |
2238 | } |
2239 | ||
f787a5f5 | 2240 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
673a394b | 2241 | { |
f787a5f5 | 2242 | struct drm_i915_file_private *file_priv = file->driver_priv; |
673a394b | 2243 | |
f787a5f5 | 2244 | kfree(file_priv); |
673a394b EA |
2245 | } |
2246 | ||
c153f45f | 2247 | struct drm_ioctl_desc i915_ioctls[] = { |
1b2f1489 DA |
2248 | DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2249 | DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), | |
2250 | DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), | |
2251 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), | |
2252 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), | |
2253 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), | |
2254 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), | |
2255 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2256 | DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH), | |
2257 | DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH), | |
2258 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2259 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), | |
2260 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2261 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2262 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), | |
2263 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), | |
2264 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
2265 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | |
2266 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), | |
2267 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), | |
2268 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), | |
2269 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), | |
2270 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), | |
2271 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), | |
2272 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | |
2273 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | |
2274 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), | |
2275 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), | |
2276 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), | |
2277 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), | |
2278 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), | |
2279 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), | |
2280 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), | |
2281 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), | |
2282 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), | |
2283 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), | |
2284 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), | |
2285 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), | |
2286 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | |
2287 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | |
c94f7029 DA |
2288 | }; |
2289 | ||
2290 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | |
cda17380 DA |
2291 | |
2292 | /** | |
2293 | * Determine if the device really is AGP or not. | |
2294 | * | |
2295 | * All Intel graphics chipsets are treated as AGP, even if they are really | |
2296 | * PCI-e. | |
2297 | * | |
2298 | * \param dev The device to be tested. | |
2299 | * | |
2300 | * \returns | |
2301 | * A value of 1 is always retured to indictate every i9x5 is AGP. | |
2302 | */ | |
84b1fd10 | 2303 | int i915_driver_device_is_agp(struct drm_device * dev) |
cda17380 DA |
2304 | { |
2305 | return 1; | |
2306 | } |